Merge branch 'master' into node-4763-community-issue-calendly-personal-access-token-auth-broken
Some checks failed
CI: Python / Checks (push) Has been cancelled

This commit is contained in:
Michael Kret 2026-05-07 14:20:02 +03:00 committed by GitHub
commit 215e0282ff
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
104 changed files with 4687 additions and 485 deletions

View File

@ -45,6 +45,13 @@ runs:
mkdir -p "$PNPM_STORE_PATH"
fi
- name: Configure SafeChain
shell: bash
run: |
# SafeChain only reads configs from this directory https://github.com/AikidoSec/safe-chain#configuration-options-1
mkdir -p "$HOME/.safe-chain"
cp "${{ github.action_path }}/safe-chain.config.json" "$HOME/.safe-chain/config.json"
- name: Install Aikido SafeChain
run: |
VERSION="1.5.1"
@ -54,10 +61,6 @@ runs:
echo "${EXPECTED_SHA256} install-safe-chain.sh" | sha256sum -c -
sh install-safe-chain.sh --ci
rm install-safe-chain.sh
# Exclude first-party @n8n/* packages from SafeChain's minimum-package-age
# filter so freshly-published versions stay visible to every subsequent
# step in the job (install, build, and publish).
echo "SAFE_CHAIN_MINIMUM_PACKAGE_AGE_EXCLUSIONS=@n8n/*,n8n,n8n-containers,n8n-core,n8n-editor-ui,n8n-node-dev,n8n-nodes-base,n8n-playwright,n8n-workflow" >> "$GITHUB_ENV"
shell: bash
- name: Install Dependencies

View File

@ -0,0 +1,16 @@
{
"npm": {
"minimumPackageAgeExclusions": [
"@n8n/*",
"@n8n_io/*",
"n8n",
"n8n-containers",
"n8n-core",
"n8n-editor-ui",
"n8n-node-dev",
"n8n-nodes-base",
"n8n-playwright",
"n8n-workflow"
]
}
}

View File

@ -41,7 +41,12 @@ jobs:
chromatic:
name: Chromatic
needs: filter
if: needs.filter.outputs.design_system == 'true'
# Skip on fork PRs — they don't have access to the Chromatic secret.
# This job is intentionally not in `required-review-checks` needs, so it
# is non-blocking and won't gate merging.
if: >-
needs.filter.outputs.design_system == 'true' &&
github.event.pull_request.head.repo.full_name == github.repository
uses: ./.github/workflows/test-visual-chromatic.yml
with:
ref: ${{ needs.filter.outputs.commit_sha }}
@ -51,7 +56,7 @@ jobs:
# PRs cannot be merged unless this job passes.
required-review-checks:
name: Required Review Checks
needs: [filter, chromatic]
needs: [filter]
if: always()
runs-on: ubuntu-slim
steps:

View File

@ -34,4 +34,4 @@ jobs:
skip: 'release/**'
onlyChanged: true
projectToken: ${{ secrets.CHROMATIC_PROJECT_TOKEN }}
exitZeroOnChanges: false
exitZeroOnChanges: true

View File

@ -170,6 +170,7 @@
"fast-xml-parser": "5.7.0"
},
"patchedDependencies": {
"axios@1.15.0": "patches/axios@1.15.0.patch",
"bull@4.16.4": "patches/bull@4.16.4.patch",
"pdfjs-dist@5.3.31": "patches/pdfjs-dist@5.3.31.patch",
"pkce-challenge@5.0.0": "patches/pkce-challenge@5.0.0.patch",

View File

@ -48,6 +48,13 @@ export const INCOMPATIBLE_WORKFLOW_TOOL_BODY_NODE_TYPES = [
export const AGENT_SCHEDULE_TRIGGER_TYPE = 'schedule';
/**
* Source string recorded on agent executions invoked from a workflow via the
* MessageAnAgent node. Mirrors the pattern set by chat/slack/schedule sources
* so the session detail view can attribute thread origin uniformly.
*/
export const AGENT_WORKFLOW_TRIGGER_TYPE = 'workflow';
export const DEFAULT_AGENT_SCHEDULE_WAKE_UP_PROMPT =
'Automated message: you were triggered on schedule.';

View File

@ -29,6 +29,7 @@ export {
InstanceAiConfirmRequestDto,
type InstanceAiConfirmRequest,
type InstanceAiConfirmRequestKind,
type InstanceAiResourceDecision,
} from './instance-ai/instance-ai-confirm-request.dto';
export { InstanceAiFeedbackRequestDto } from './instance-ai/instance-ai-feedback-request.dto';
export { InstanceAiRenameThreadRequestDto } from './instance-ai/instance-ai-rename-thread-request.dto';

View File

@ -63,7 +63,7 @@ describe('InstanceAiConfirmRequestDto', () => {
['domainAccessDeny', { kind: 'domainAccessDeny' }],
// confirmResourceDecision (store)
[
'resourceDecision with arbitrary decision token',
'resourceDecision with allowed decision token',
{ kind: 'resourceDecision', resourceDecision: 'allowForSession' },
],
// useSetupActions: handleApply
@ -130,6 +130,14 @@ describe('InstanceAiConfirmRequestDto', () => {
expect(result.success).toBe(false);
});
test('resourceDecision rejects persistent daemon-only decisions', () => {
const result = InstanceAiConfirmRequestDto.safeParse({
kind: 'resourceDecision',
resourceDecision: 'alwaysAllow',
});
expect(result.success).toBe(false);
});
test('setupWorkflowTestTrigger without testTriggerNode', () => {
const result = InstanceAiConfirmRequestDto.safeParse({ kind: 'setupWorkflowTestTrigger' });
expect(result.success).toBe(false);

View File

@ -1,6 +1,9 @@
import { z } from 'zod';
import { domainAccessActionSchema } from '../../schemas/instance-ai.schema';
import {
domainAccessActionSchema,
instanceGatewayResourceDecisionSchema,
} from '../../schemas/instance-ai.schema';
/**
* Plain approval/denial. Also carries optional `userInput` for:
@ -46,13 +49,10 @@ const domainAccessDenySchema = z.object({
kind: z.literal('domainAccessDeny'),
});
/** Gateway resource-access decision (inputType='resource-decision'). Approval is implied.
* `resourceDecision` is one of the opaque tokens listed in the request's `options[]` array
* (e.g. `'denyOnce'`, `'allowOnce'`, `'allowForSession'`) the daemon defines the vocabulary,
* so we keep this as a string rather than a fixed enum. */
/** Gateway resource-access decision (inputType='resource-decision'). Approval is implied. */
const resourceDecisionConfirmSchema = z.object({
kind: z.literal('resourceDecision'),
resourceDecision: z.string(),
resourceDecision: instanceGatewayResourceDecisionSchema,
});
/** Per-node credential map: `Record<nodeName, Record<credentialType, credentialId>>`. */
@ -90,3 +90,4 @@ export const InstanceAiConfirmRequestDto = z.discriminatedUnion('kind', [
export type InstanceAiConfirmRequest = z.infer<typeof InstanceAiConfirmRequestDto>;
export type InstanceAiConfirmRequestKind = InstanceAiConfirmRequest['kind'];
export type InstanceAiResourceDecision = z.infer<typeof instanceGatewayResourceDecisionSchema>;

View File

@ -101,6 +101,7 @@ export interface FrontendSettings {
nodeJsVersion: string;
nodeEnv: string | undefined;
concurrency: number;
evaluationConcurrencyLimit: number;
authCookie: {
secure: boolean;
};

View File

@ -324,7 +324,9 @@ export {
domainAccessActionSchema,
domainAccessMetaSchema,
credentialFlowSchema,
gatewayConfirmationRequiredWirePayloadSchema,
gatewayConfirmationRequiredPayloadSchema,
instanceGatewayResourceDecisionSchema,
GATEWAY_CONFIRMATION_REQUIRED_PREFIX,
InstanceAiSendMessageRequest,
InstanceAiEvalExecutionRequest,
@ -401,7 +403,9 @@ export type {
DomainAccessAction,
DomainAccessMeta,
InstanceAiCredentialFlow,
GatewayConfirmationRequiredWirePayload,
GatewayConfirmationRequiredPayload,
InstanceGatewayResourceDecision,
ToolCategory,
InstanceAiWorkflowSetupNode,
PlannedTaskArg,

View File

@ -283,7 +283,14 @@ export type PlannedTaskArg = z.infer<typeof plannedTaskArgSchema>;
/** Protocol prefix used by the daemon to signal a resource-access confirmation is required. */
export const GATEWAY_CONFIRMATION_REQUIRED_PREFIX = 'GATEWAY_CONFIRMATION_REQUIRED::';
export const gatewayConfirmationRequiredPayloadSchema = z.object({
export const instanceGatewayResourceDecisionSchema = z.enum([
'denyOnce',
'allowOnce',
'allowForSession',
]);
export type InstanceGatewayResourceDecision = z.infer<typeof instanceGatewayResourceDecisionSchema>;
export const gatewayConfirmationRequiredWirePayloadSchema = z.object({
toolGroup: z.string(),
resource: z.string(),
description: z.string(),
@ -291,6 +298,15 @@ export const gatewayConfirmationRequiredPayloadSchema = z.object({
options: z.array(z.string()),
});
export type GatewayConfirmationRequiredWirePayload = z.infer<
typeof gatewayConfirmationRequiredWirePayloadSchema
>;
export const gatewayConfirmationRequiredPayloadSchema =
gatewayConfirmationRequiredWirePayloadSchema.extend({
options: z.array(instanceGatewayResourceDecisionSchema),
});
export type GatewayConfirmationRequiredPayload = z.infer<
typeof gatewayConfirmationRequiredPayloadSchema
>;

View File

@ -41,6 +41,21 @@ describe('isProtectedSettingsPath', () => {
it('catches paths with trailing slash', () => {
expect(isProtectedSettingsPath(settingsDir + '/')).toBe(true);
});
it('matches case variants on case-insensitive platforms', () => {
const originalPlatform = process.platform;
Object.defineProperty(process, 'platform', { value: 'darwin', configurable: true });
try {
const caseMismatched = path.join(
settingsDir.replace('.n8n-gateway', '.N8N-Gateway'),
'settings.json',
);
expect(isProtectedSettingsPath(caseMismatched)).toBe(true);
} finally {
Object.defineProperty(process, 'platform', { value: originalPlatform, configurable: true });
}
});
});
describe('parseConfig — allowedOrigins', () => {

View File

@ -257,8 +257,12 @@ export function getSettingsDir(): string {
* Used to prevent computer-use tools from modifying their own configuration.
*/
export function isProtectedSettingsPath(absolutePath: string): boolean {
const dir = path.resolve(getSettingsDir());
const target = path.resolve(absolutePath);
let dir = path.resolve(getSettingsDir());
let target = path.resolve(absolutePath);
if (process.platform === 'darwin' || process.platform === 'win32') {
dir = dir.toLowerCase();
target = target.toLowerCase();
}
return target === dir || target.startsWith(dir + path.sep);
}

View File

@ -108,6 +108,15 @@ describe('copyFileTool', () => {
).rejects.toThrow('escapes');
});
it('rejects excluded directories on source', async () => {
await expect(
copyFileTool.execute(
{ sourcePath: 'node_modules/pkg/index.js', destinationPath: 'dst.txt' },
CONTEXT,
),
).rejects.toThrow('excluded');
});
it('rejects path traversal on destination', async () => {
mockMkdir();

View File

@ -4,7 +4,7 @@ import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { formatCallToolResult } from '../utils';
import { buildFilesystemResource, resolveSafePath } from './fs-utils';
import { buildFilesystemResource, resolveReadablePath, resolveSafePath } from './fs-utils';
const inputSchema = z.object({
sourcePath: z.string().describe('Source file path relative to root'),
@ -34,7 +34,7 @@ export const copyFileTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ sourcePath, destinationPath }, { dir }) {
const resolvedSrc = await resolveSafePath(dir, sourcePath);
const resolvedSrc = await resolveReadablePath(dir, sourcePath);
const resolvedDest = await resolveSafePath(dir, destinationPath);
await fs.mkdir(path.dirname(resolvedDest), { recursive: true });

View File

@ -79,6 +79,41 @@ describe('editFileTool', () => {
});
});
describe('getAffectedResources', () => {
it('declares both read and write access for the edited file', async () => {
const resources = await editFileTool.getAffectedResources(
{ filePath: 'src/index.ts', oldString: 'foo', newString: 'bar' },
CONTEXT,
);
expect(resources).toEqual([
{
toolGroup: 'filesystemRead',
resource: '/base/src/index.ts',
description: 'Read file: src/index.ts',
},
{
toolGroup: 'filesystemWrite',
resource: '/base/src/index.ts',
description: 'Edit file: src/index.ts',
},
]);
});
it('rejects excluded paths for the read phase', async () => {
await expect(
editFileTool.getAffectedResources(
{
filePath: 'node_modules/pkg/index.js',
oldString: 'foo',
newString: 'bar',
},
CONTEXT,
),
).rejects.toThrow('excluded from filesystem reads');
});
});
describe('execute', () => {
it('replaces the first occurrence of oldString with newString', async () => {
mockStat(100);
@ -158,5 +193,19 @@ describe('editFileTool', () => {
),
).rejects.toThrow('escapes');
});
it.each([
'node_modules/pkg/index.js',
'Node_Modules/pkg/index.js',
'.git/config',
'dist/out.js',
])('rejects edit reads under excluded directory %s', async (filePath) => {
await expect(
editFileTool.execute({ filePath, oldString: 'foo', newString: 'bar' }, CONTEXT),
).rejects.toThrow('excluded from filesystem reads');
expect(fs.stat).not.toHaveBeenCalled();
expect(fs.readFile).not.toHaveBeenCalled();
expect(fs.writeFile).not.toHaveBeenCalled();
});
});
});

View File

@ -4,7 +4,7 @@ import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { formatCallToolResult } from '../utils';
import { MAX_FILE_SIZE } from './constants';
import { buildFilesystemResource, resolveSafePath } from './fs-utils';
import { buildFilesystemResource, resolveReadablePath, resolveSafePath } from './fs-utils';
const inputSchema = z.object({
filePath: z.string().describe('File path relative to root'),
@ -20,26 +20,28 @@ export const editFileTool: ToolDefinition<typeof inputSchema> = {
annotations: {},
async getAffectedResources({ filePath }, { dir }) {
return [
await buildFilesystemResource(dir, filePath, 'filesystemRead', `Read file: ${filePath}`),
await buildFilesystemResource(dir, filePath, 'filesystemWrite', `Edit file: ${filePath}`),
];
},
async execute({ filePath, oldString, newString }, { dir }) {
const resolvedPath = await resolveSafePath(dir, filePath);
const resolvedReadablePath = await resolveReadablePath(dir, filePath);
const resolvedWritablePath = await resolveSafePath(dir, filePath);
const stat = await fs.stat(resolvedPath);
const stat = await fs.stat(resolvedReadablePath);
if (stat.size > MAX_FILE_SIZE) {
throw new Error(
`File too large: ${stat.size} bytes (max ${MAX_FILE_SIZE} bytes). Use write_file to replace the entire content.`,
);
}
const content = await fs.readFile(resolvedPath, 'utf-8');
const content = await fs.readFile(resolvedReadablePath, 'utf-8');
if (!content.includes(oldString)) {
throw new Error(`oldString not found in file: ${filePath}`);
}
await fs.writeFile(resolvedPath, content.replace(oldString, newString), 'utf-8');
await fs.writeFile(resolvedWritablePath, content.replace(oldString, newString), 'utf-8');
return formatCallToolResult({ path: filePath });
},

View File

@ -1,7 +1,12 @@
import type { Stats } from 'node:fs';
import * as fs from 'node:fs/promises';
import { buildFilesystemResource, resolveSafePath } from './fs-utils';
import {
buildFilesystemResource,
isLikelyBinaryContent,
resolveReadablePath,
resolveSafePath,
} from './fs-utils';
import * as config from '../../config';
jest.mock('node:fs/promises');
@ -153,6 +158,38 @@ describe('resolveSafePath', () => {
});
});
describe('isLikelyBinaryContent', () => {
it('treats text as valid when a multibyte character crosses the sample boundary', () => {
const buffer = Buffer.concat([Buffer.alloc(8191, 'a'), Buffer.from('é')]);
expect(isLikelyBinaryContent(buffer)).toBe(false);
});
it('detects null bytes outside the sample boundary', () => {
const buffer = Buffer.concat([Buffer.alloc(8192, 'a'), Buffer.from([0])]);
expect(isLikelyBinaryContent(buffer)).toBe(true);
});
});
describe('resolveReadablePath', () => {
beforeEach(() => {
jest.resetAllMocks();
jest.mocked(fs.lstat).mockRejectedValue(enoent());
});
it('throws when a symlink resolves into an excluded directory segment', async () => {
mockRealpath([
[BASE, BASE],
[`${BASE}/link`, `${BASE}/node_modules`],
]);
await expect(resolveReadablePath(BASE, 'link/pkg/index.js')).rejects.toThrow(
'excluded from filesystem reads',
);
});
});
describe('buildFilesystemResource — settings self-protection', () => {
const settingsDir = config.getSettingsDir();
const settingsFile = config.getSettingsFilePath();
@ -196,4 +233,44 @@ describe('buildFilesystemResource — settings self-protection', () => {
);
expect(result.resource).toBe('/base/src/index.ts');
});
it('throws for filesystemRead targeting an excluded directory segment', async () => {
mockRealpath([[BASE, BASE]]);
await expect(
buildFilesystemResource(BASE, 'node_modules/pkg/index.js', 'filesystemRead', 'Read file'),
).rejects.toThrow('excluded from filesystem reads');
});
it('throws for filesystemRead when a symlink targets an excluded directory segment', async () => {
mockRealpath([
[BASE, BASE],
[`${BASE}/link`, `${BASE}/node_modules`],
]);
await expect(
buildFilesystemResource(BASE, 'link/pkg/index.js', 'filesystemRead', 'Read file'),
).rejects.toThrow('excluded from filesystem reads');
});
it('matches excluded directory segments case-insensitively', async () => {
mockRealpath([[BASE, BASE]]);
await expect(
buildFilesystemResource(BASE, 'Node_Modules/pkg/index.js', 'filesystemRead', 'Read file'),
).rejects.toThrow('excluded from filesystem reads');
});
it('does not apply excluded segment policy to filesystemWrite resources', async () => {
mockRealpath([[BASE, BASE]]);
const result = await buildFilesystemResource(
BASE,
'dist/generated.js',
'filesystemWrite',
'Write generated file',
);
expect(result.resource).toBe('/base/dist/generated.js');
});
});

View File

@ -1,11 +1,15 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { TextDecoder } from 'node:util';
import { isProtectedSettingsPath } from '../../config';
import type { AffectedResource } from '../types';
const MAX_ENTRIES = 10_000;
const DEFAULT_MAX_DEPTH = 8;
const BINARY_CHECK_SIZE = 8192;
const MAX_CONTROL_CHAR_RATIO = 0.3;
const utf8Decoder = new TextDecoder('utf-8', { fatal: true });
export const EXCLUDED_DIRS = new Set([
'node_modules',
@ -25,6 +29,9 @@ export const EXCLUDED_DIRS = new Set([
'.output',
'.svelte-kit',
]);
const NORMALIZED_EXCLUDED_DIRS = new Set(
[...EXCLUDED_DIRS].map((segment) => segment.toLowerCase()),
);
export interface TreeEntry {
path: string;
@ -82,7 +89,7 @@ export async function scanDirectory(
break;
}
if (EXCLUDED_DIRS.has(entry.name) && entry.isDirectory()) continue;
if (isExcludedDirName(entry.name) && entry.isDirectory()) continue;
if (entry.name.startsWith('.') && !isAllowedDotFile(entry.name)) continue;
const entryRelPath = relativePath ? `${relativePath}/${entry.name}` : entry.name;
@ -131,6 +138,47 @@ function isAllowedDotFile(name: string): boolean {
return allowed.has(name);
}
export function assertNoExcludedSegments(absolutePath: string, basePath: string): void {
const relativePath = path.relative(path.resolve(basePath), path.resolve(absolutePath));
const segments = relativePath.split(path.sep).filter(Boolean);
const excludedSegment = segments.find(isExcludedDirName);
if (excludedSegment) {
throw new Error(`Access denied: "${excludedSegment}" is excluded from filesystem reads`);
}
}
export function isExcludedDirName(segment: string): boolean {
return NORMALIZED_EXCLUDED_DIRS.has(segment.toLowerCase());
}
export function isLikelyBinaryContent(buffer: Buffer): boolean {
if (buffer.length === 0) return false;
if (buffer.includes(0)) return true;
try {
utf8Decoder.decode(buffer);
} catch {
return true;
}
const checkSlice = buffer.subarray(0, Math.min(BINARY_CHECK_SIZE, buffer.length));
let controlChars = 0;
for (const byte of checkSlice) {
const isAllowedControl = byte === 9 || byte === 10 || byte === 12 || byte === 13;
if (byte < 32 && !isAllowedControl) {
controlChars++;
}
}
return controlChars / checkSlice.length > MAX_CONTROL_CHAR_RATIO;
}
interface ResolvedSafePath {
absolutePath: string;
realBasePath: string;
resolvedPath: string;
}
/**
* Resolve a path safely within the base directory.
*
@ -150,7 +198,10 @@ function isAllowedDotFile(name: string): boolean {
* Returns the logical absolute path (without resolving symlinks), so the
* caller never needs to know that a symlink is involved.
*/
export async function resolveSafePath(basePath: string, relativePath: string): Promise<string> {
async function resolveSafePathDetails(
basePath: string,
relativePath: string,
): Promise<ResolvedSafePath> {
const realBase = await fs.realpath(basePath);
const absolute = path.resolve(basePath, relativePath);
@ -199,7 +250,22 @@ export async function resolveSafePath(basePath: string, relativePath: string): P
throw new Error(`Access denied: cannot access "${relativePath}"`);
}
return absolute;
return { absolutePath: absolute, realBasePath: realBase, resolvedPath: current };
}
export async function resolveSafePath(basePath: string, relativePath: string): Promise<string> {
const { absolutePath } = await resolveSafePathDetails(basePath, relativePath);
return absolutePath;
}
export async function resolveReadablePath(basePath: string, relativePath: string): Promise<string> {
const { absolutePath, realBasePath, resolvedPath } = await resolveSafePathDetails(
basePath,
relativePath,
);
assertNoExcludedSegments(absolutePath, basePath);
assertNoExcludedSegments(resolvedPath, realBasePath);
return absolutePath;
}
/**
@ -213,7 +279,10 @@ export async function buildFilesystemResource(
toolGroup: 'filesystemRead' | 'filesystemWrite',
description: string,
): Promise<AffectedResource> {
const absolutePath = await resolveSafePath(dir, inputPath);
const absolutePath =
toolGroup === 'filesystemRead'
? await resolveReadablePath(dir, inputPath)
: await resolveSafePath(dir, inputPath);
return { toolGroup, resource: absolutePath, description };
}

View File

@ -1,7 +1,7 @@
import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { buildFilesystemResource, resolveSafePath, scanDirectory } from './fs-utils';
import { buildFilesystemResource, resolveReadablePath, scanDirectory } from './fs-utils';
const inputSchema = z.object({
dirPath: z.string().describe('Directory path relative to root (use "." for root)'),
@ -24,7 +24,7 @@ export const getFileTreeTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ dirPath, maxDepth }, { dir }) {
const resolvedDir = await resolveSafePath(dir, dirPath || '.');
const resolvedDir = await resolveReadablePath(dir, dirPath || '.');
const depth = maxDepth ?? 2;
const { rootPath, tree, truncated } = await scanDirectory(resolvedDir, depth);

View File

@ -2,7 +2,7 @@ import * as path from 'node:path';
import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { buildFilesystemResource, resolveSafePath, scanDirectory } from './fs-utils';
import { buildFilesystemResource, resolveReadablePath, scanDirectory } from './fs-utils';
const inputSchema = z.object({
dirPath: z.string().describe('Directory path relative to root'),
@ -29,7 +29,7 @@ export const listFilesTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ dirPath, type, maxResults }, { dir }) {
const resolvedDir = await resolveSafePath(dir, dirPath || '.');
const resolvedDir = await resolveReadablePath(dir, dirPath || '.');
// maxDepth=0 → immediate children only, no recursion
const { tree } = await scanDirectory(resolvedDir, 0);

View File

@ -119,6 +119,15 @@ describe('moveFileTool', () => {
).rejects.toThrow('escapes');
});
it('rejects excluded directories on source', async () => {
await expect(
moveFileTool.execute(
{ sourcePath: 'node_modules/pkg/index.js', destinationPath: 'dest.txt' },
CONTEXT,
),
).rejects.toThrow('excluded');
});
it('rejects path traversal on destination', async () => {
mockMkdir();

View File

@ -4,7 +4,7 @@ import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { formatCallToolResult } from '../utils';
import { buildFilesystemResource, resolveSafePath } from './fs-utils';
import { buildFilesystemResource, resolveReadablePath, resolveSafePath } from './fs-utils';
const inputSchema = z.object({
sourcePath: z.string().describe('Source path relative to root (file or directory)'),
@ -34,7 +34,7 @@ export const moveFileTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ sourcePath, destinationPath }, { dir }) {
const resolvedSrc = await resolveSafePath(dir, sourcePath);
const resolvedSrc = await resolveReadablePath(dir, sourcePath);
const resolvedDest = await resolveSafePath(dir, destinationPath);
await fs.mkdir(path.dirname(resolvedDest), { recursive: true });

View File

@ -139,6 +139,15 @@ describe('readFileTool', () => {
);
});
it('rejects binary files without null bytes', async () => {
mockStat(100);
mockReadFile(Buffer.from([0xff, 0xfe, 0xfd, 0xfc]));
await expect(readFileTool.execute({ filePath: 'binary.dat' }, CONTEXT)).rejects.toThrow(
'Binary file',
);
});
it('rejects files larger than 512KB', async () => {
mockStat(600 * 1024);
@ -153,6 +162,15 @@ describe('readFileTool', () => {
).rejects.toThrow('escapes');
});
it.each(['node_modules/foo/.env', 'Node_Modules/foo/.env', '.git/config', 'dist/bundle.js'])(
'rejects direct reads under excluded directory %s',
async (filePath) => {
await expect(readFileTool.execute({ filePath }, CONTEXT)).rejects.toThrow(
'excluded from filesystem reads',
);
},
);
it.each([
{ startLine: undefined, maxLines: undefined },
{ startLine: 1, maxLines: 5 },

View File

@ -4,9 +4,8 @@ import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { formatCallToolResult } from '../utils';
import { MAX_FILE_SIZE } from './constants';
import { buildFilesystemResource, resolveSafePath } from './fs-utils';
import { buildFilesystemResource, isLikelyBinaryContent, resolveReadablePath } from './fs-utils';
const DEFAULT_MAX_LINES = 200;
const BINARY_CHECK_SIZE = 8192;
const inputSchema = z.object({
filePath: z.string().describe('File path relative to root'),
@ -25,7 +24,7 @@ export const readFileTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ filePath, startLine, maxLines }, { dir }) {
const resolvedPath = await resolveSafePath(dir, filePath);
const resolvedPath = await resolveReadablePath(dir, filePath);
const stat = await fs.stat(resolvedPath);
if (stat.size > MAX_FILE_SIZE) {
@ -34,11 +33,10 @@ export const readFileTool: ToolDefinition<typeof inputSchema> = {
);
}
const buffer = await fs.readFile(resolvedPath);
const fileContent = await fs.readFile(resolvedPath);
const buffer = Buffer.isBuffer(fileContent) ? fileContent : Buffer.from(fileContent);
// Binary detection: check first 8KB for null bytes
const checkSlice = buffer.subarray(0, Math.min(BINARY_CHECK_SIZE, buffer.length));
if (checkSlice.includes(0)) {
if (isLikelyBinaryContent(buffer)) {
throw new Error('Binary file detected — cannot read binary files');
}

View File

@ -212,6 +212,31 @@ describe('searchFilesTool', () => {
).rejects.toThrow('escapes');
});
it.each(['node_modules', 'Node_Modules', '.git', 'dist'])(
'rejects direct search roots under excluded directory %s',
async (dirPath) => {
await expect(searchFilesTool.execute({ dirPath, query: 'foo' }, CONTEXT)).rejects.toThrow(
'excluded from filesystem reads',
);
},
);
it('skips likely binary files', async () => {
(fs.readdir as jest.Mock).mockResolvedValue([dirent('binary.dat', false)]);
mockStat();
(fs.readFile as jest.Mock).mockResolvedValue(Buffer.from([0xff, 0xfe, 0xfd, 0xfc]));
const result = await searchFilesTool.execute({ dirPath: '.', query: 'foo' }, CONTEXT);
// eslint-disable-next-line n8n-local-rules/no-uncaught-json-parse
const data = JSON.parse(textOf(result)) as {
matches: unknown[];
totalMatches: number;
};
expect(data.matches).toHaveLength(0);
expect(data.totalMatches).toBe(0);
});
it.each([
{ query: 'foo', ignoreCase: undefined, label: 'case-sensitive' },
{ query: 'foo', ignoreCase: true, label: 'case-insensitive' },

View File

@ -5,7 +5,12 @@ import { z } from 'zod';
import type { ToolDefinition } from '../types';
import { formatCallToolResult } from '../utils';
import { MAX_FILE_SIZE } from './constants';
import { EXCLUDED_DIRS, buildFilesystemResource, resolveSafePath } from './fs-utils';
import {
buildFilesystemResource,
isExcludedDirName,
isLikelyBinaryContent,
resolveReadablePath,
} from './fs-utils';
const inputSchema = z.object({
dirPath: z.string().describe('Directory to search in'),
@ -26,7 +31,7 @@ export const searchFilesTool: ToolDefinition<typeof inputSchema> = {
];
},
async execute({ dirPath, query, filePattern, ignoreCase, maxResults }, { dir }) {
const resolvedDir = await resolveSafePath(dir, dirPath);
const resolvedDir = await resolveReadablePath(dir, dirPath);
const limit = maxResults ?? 50;
const flags = ignoreCase ? 'gi' : 'g';
const regex = new RegExp(escapeRegex(query), flags);
@ -44,7 +49,10 @@ export const searchFilesTool: ToolDefinition<typeof inputSchema> = {
const stat = await fs.stat(fullPath);
if (stat.size > MAX_FILE_SIZE) continue;
const content = await fs.readFile(fullPath, 'utf-8');
const fileContent = await fs.readFile(fullPath);
const buffer = Buffer.isBuffer(fileContent) ? fileContent : Buffer.from(fileContent);
if (isLikelyBinaryContent(buffer)) continue;
const content = buffer.toString('utf-8');
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
@ -77,7 +85,7 @@ async function collectFiles(
const entries = await fs.readdir(dir, { withFileTypes: true });
for (const entry of entries) {
if (EXCLUDED_DIRS.has(entry.name) && entry.isDirectory()) continue;
if (isExcludedDirName(entry.name) && entry.isDirectory()) continue;
const fullPath = path.join(dir, entry.name);
const relativePath = path.relative(basePath, fullPath);

View File

@ -1,10 +1,18 @@
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
import { GlobalConfig } from '@n8n/config';
import type { SqliteConfig } from '@n8n/config';
import { Container } from '@n8n/di';
import { In, LessThan, And, Not } from '@n8n/typeorm';
import type { SelectQueryBuilder } from '@n8n/typeorm';
import { In, LessThan, LessThanOrEqual, And, Not } from '@n8n/typeorm';
import { mock } from 'jest-mock-extended';
import { BinaryDataService } from 'n8n-core';
import type { IRunExecutionData, IWorkflowBase } from 'n8n-workflow';
import { nanoid } from 'nanoid';
import { ExecutionEntity } from '../../entities';
import type { IExecutionResponse } from '../../entities/types-db';
import { mockEntityManager } from '../../utils/test-utils/mock-entity-manager';
import { mockInstance } from '../../utils/test-utils/mock-instance';
import { ExecutionRepository } from '../execution.repository';
const GREATER_THAN_MAX_UPDATE_THRESHOLD = 901;
@ -14,6 +22,10 @@ const GREATER_THAN_MAX_UPDATE_THRESHOLD = 901;
*/
describe('ExecutionRepository', () => {
const entityManager = mockEntityManager(ExecutionEntity);
const globalConfig = mockInstance(GlobalConfig, {
logging: { outputs: ['console'], scopes: [] },
});
mockInstance(BinaryDataService);
const executionRepository = Container.get(ExecutionRepository);
beforeEach(() => {
@ -366,6 +378,18 @@ describe('ExecutionRepository', () => {
await executionRepository.markAsCrashed(manyExecutionsToMarkAsCrashed);
expect(entityManager.update).toBeCalledTimes(2);
});
test('should clear waitTill when marking executions as crashed', async () => {
const executionIds = ['1', '2'];
await executionRepository.markAsCrashed(executionIds);
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: In(executionIds) },
expect.objectContaining({ status: 'crashed', waitTill: null }),
);
});
});
describe('stopDuringRun', () => {
@ -427,7 +451,7 @@ describe('ExecutionRepository', () => {
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: executionId },
{ status: 'running', startedAt: expect.any(Date) },
{ status: 'running', startedAt: expect.any(Date), waitTill: null },
);
expect(result).toBeInstanceOf(Date);
});
@ -444,9 +468,133 @@ describe('ExecutionRepository', () => {
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: executionId },
{ status: 'running', startedAt: existingStartedAt },
{ status: 'running', startedAt: existingStartedAt, waitTill: null },
);
expect(result).toBe(existingStartedAt);
});
});
describe('cancelMany', () => {
test('should clear waitTill when canceling executions', async () => {
const executionIds = ['1', '2', '3'];
await executionRepository.cancelMany(executionIds);
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: In(executionIds) },
expect.objectContaining({ status: 'canceled', waitTill: null }),
);
});
});
describe('stopBeforeRun', () => {
test('should clear waitTill when stopping execution before run', async () => {
const execution = mock<IExecutionResponse>({
id: '1',
status: 'waiting',
waitTill: new Date('2025-01-01T00:00:00.000Z'),
});
await executionRepository.stopBeforeRun(execution);
expect(execution.waitTill).toBeNull();
expect(execution.status).toBe('canceled');
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: '1' },
expect.objectContaining({ status: 'canceled', waitTill: null }),
);
});
});
describe('getWaitingExecutions', () => {
const mockDate = new Date('2023-12-28 12:34:56.789Z');
beforeAll(() => jest.useFakeTimers().setSystemTime(mockDate));
afterAll(() => jest.useRealTimers());
test.each(['sqlite', 'postgresdb'] as const)(
'on %s, should only return executions with status=waiting',
async (dbType) => {
globalConfig.database.type = dbType;
entityManager.find.mockResolvedValueOnce([]);
await executionRepository.getWaitingExecutions();
expect(entityManager.find).toHaveBeenCalledWith(ExecutionEntity, {
order: { waitTill: 'ASC' },
select: ['id', 'waitTill'],
where: {
status: 'waiting',
waitTill: LessThanOrEqual(
dbType === 'sqlite'
? '2023-12-28 12:36:06.789'
: new Date('2023-12-28T12:36:06.789Z'),
),
},
});
},
);
});
describe('deleteExecutionsByFilter', () => {
test('should delete binary data', async () => {
const workflowId = nanoid();
const binaryDataService = Container.get(BinaryDataService);
jest.spyOn(executionRepository, 'createQueryBuilder').mockReturnValue(
mock<SelectQueryBuilder<ExecutionEntity>>({
select: jest.fn().mockReturnThis(),
andWhere: jest.fn().mockReturnThis(),
getMany: jest.fn().mockResolvedValue([{ id: '1', workflowId }]),
}),
);
await executionRepository.deleteExecutionsByFilter({
filters: { id: '1' },
accessibleWorkflowIds: ['1'],
deleteConditions: { ids: ['1'] },
});
expect(binaryDataService.deleteMany).toHaveBeenCalledWith([
{ type: 'execution', executionId: '1', workflowId },
]);
});
});
describe('updateExistingExecution', () => {
test.each(['sqlite', 'postgresdb'] as const)(
'should update execution and data in transaction on %s',
async (dbType) => {
globalConfig.database.type = dbType;
globalConfig.database.sqlite = mock<SqliteConfig>({ poolSize: 1 });
const executionId = '1';
const execution = mock<IExecutionResponse>({
id: executionId,
data: mock<IRunExecutionData>(),
workflowData: mock<IWorkflowBase>(),
status: 'success',
});
const txCallback = jest.fn();
entityManager.transaction.mockImplementation(async (fn: unknown) => {
await (fn as (em: typeof entityManager) => Promise<unknown>)(entityManager);
txCallback();
});
entityManager.update.mockResolvedValue({ affected: 1, raw: [], generatedMaps: [] });
await executionRepository.updateExistingExecution(executionId, execution);
expect(entityManager.transaction).toHaveBeenCalled();
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: executionId },
expect.objectContaining({ status: 'success' }),
);
expect(txCallback).toHaveBeenCalledTimes(1);
},
);
});
});

View File

@ -363,6 +363,7 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
{
status: 'crashed',
stoppedAt: new Date(),
waitTill: null,
},
);
this.logger.info('Marked executions as `crashed`', { executionIds });
@ -382,7 +383,7 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
await manager.update(
ExecutionEntity,
{ id: executionId },
{ status: 'running', startedAt: effectiveStartedAt },
{ status: 'running', startedAt: effectiveStartedAt, waitTill: null },
);
return effectiveStartedAt;
@ -608,7 +609,7 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
const waitTill = new Date(Date.now() + 70000);
const where: FindOptionsWhere<ExecutionEntity> = {
waitTill: LessThanOrEqual(waitTill),
status: Not('crashed'),
status: 'waiting',
};
const dbType = this.globalConfig.database.type;
@ -783,10 +784,11 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
async stopBeforeRun(execution: IExecutionResponse) {
execution.status = 'canceled';
execution.stoppedAt = new Date();
execution.waitTill = null;
await this.update(
{ id: execution.id },
{ status: execution.status, stoppedAt: execution.stoppedAt },
{ status: execution.status, stoppedAt: execution.stoppedAt, waitTill: execution.waitTill },
);
return execution;
@ -813,7 +815,10 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
}
async cancelMany(executionIds: string[]) {
await this.update({ id: In(executionIds) }, { status: 'canceled', stoppedAt: new Date() });
await this.update(
{ id: In(executionIds) },
{ status: 'canceled', stoppedAt: new Date(), waitTill: null },
);
}
// ----------------------------------

View File

@ -6,13 +6,13 @@
## Rule Details
The `overrides` field in `package.json` lets a package force specific versions of its (transitive) dependencies. In the context of n8n community nodes this is dangerous:
The `overrides` field in `package.json` forces specific versions of (transitive) dependencies. n8n installs each community package into an isolated `node_modules` tree (peer deps stripped before install, `require()` walks up from each node's compiled file), so an override in one node only affects that node's own resolution — it does **not** bleed into other nodes or into n8n core. The rule bans the field anyway because:
- Community nodes are installed into a shared n8n runtime alongside other nodes. Overriding a shared library (e.g. `axios`, `@langchain/core`, `minimatch`) can silently substitute an incompatible version for every other node that depends on it, causing hard-to-diagnose runtime failures.
- Community nodes are distributed as pre-built packages with their dependencies already bundled or declared as `peerDependencies`. Any version pinning that the node actually needs should happen during development, not at install time on the user's n8n instance.
- `overrides` is frequently copy-pasted from an unrelated internal project and is almost never intentional in a community node.
- **Almost always unintentional.** In practice, `overrides` blocks in community nodes are copy-pasted boilerplate from unrelated projects, sometimes alongside an empty `dependencies` so the override is a literal no-op.
- **No useful effect today.** Because of isolation, a maintainer who believes their override coordinates versions across nodes is wrong about what it does. The block is dead weight at best, actively misleading at worst.
- **Future-proofing.** If the install layout ever moves toward hoisting or partial sharing, today's "harmless" overrides start affecting other nodes' resolution. Banning the field now keeps that change safe to make.
If you have a genuine compatibility need, bundle the dependency into the published artifact or declare it via `peerDependencies` instead.
Most community nodes do not need third-party runtime libraries at all. n8n core already provides HTTP requests (`this.helpers.httpRequest`, `this.helpers.httpRequestWithAuthentication`), credential resolution, binary data helpers, and other common building blocks via the execute context — these should be the default. `dependencies` and `peerDependencies` are restricted by [`no-runtime-dependencies`](no-runtime-dependencies.md) and [`valid-peer-dependencies`](valid-peer-dependencies.md) respectively, so neither is a workaround for `overrides`.
## Examples

View File

@ -12,7 +12,7 @@ export const NoOverridesFieldRule = createRule({
},
messages: {
overridesForbidden:
'The "overrides" field is not allowed in community node packages. Dependency overrides can introduce incompatible versions of shared libraries into the n8n runtime and cause conflicts with other nodes.',
'The "overrides" field is not allowed in community node packages. Each community package installs into an isolated dependency tree, so overrides do not affect other nodes or n8n core — in practice they are copy-pasted boilerplate with no useful effect. Use the helpers on the execute context (this.helpers.httpRequest, etc.) instead; most community nodes do not need third-party runtime libraries.',
},
schema: [],
},

View File

@ -270,8 +270,8 @@ Two options:
The static key is used for all requests — no pairing/session upgrade.
- **Dynamic (pairing → session key)**:
1. `POST /instance-ai/gateway/create-link` (requires session auth) →
returns `{ token, command }`. The token is a **one-time pairing token**
(5-min TTL).
returns `{ token, command, expiresAt, ttlSeconds }`. The token is a
**one-time pairing token** (5-min TTL).
2. Daemon calls `POST /instance-ai/gateway/init` with the pairing token →
server consumes the token and returns `{ ok: true, sessionKey }`.
3. All subsequent requests (SSE, response) use the **session key** instead
@ -289,6 +289,8 @@ create-link → pairingToken (5 min TTL, single-use)
This prevents token replay: the pairing token is visible in terminal output
and `ps aux`, but it becomes useless after the first successful `init` call.
The resulting session key has no time-based expiry and remains valid until
explicit disconnect/revocation.
All key comparisons use `timingSafeEqual()` to prevent timing attacks.
---

View File

@ -42,6 +42,7 @@ import { stringifyError, truncate } from './redact';
import { createStubServices, defaultNodesJsonPath, type StubServiceHandle } from './stub-services';
import type { SimpleWorkflow } from '../../../ai-workflow-builder.ee/evaluations/evaluators/pairwise';
import { registerWithMastra } from '../../src/agent/register-with-mastra';
import { MAX_STEPS } from '../../src/constants/max-steps';
import type { InstanceAiEventBus, StoredEvent } from '../../src/event-bus';
import type { Logger } from '../../src/logger';
import { executeResumableStream } from '../../src/runtime/resumable-stream-executor';
@ -141,7 +142,11 @@ export async function buildInProcess(
const started = Date.now();
const timeoutMs = options.timeoutMs ?? 20 * 60 * 1000;
const modelId: ModelConfig = options.modelId ?? 'anthropic/claude-sonnet-4-6';
const maxSteps = options.maxSteps ?? 30;
// Match production: builds run with the same MAX_STEPS.BUILDER cap as
// `build-workflow-agent.tool.ts` uses inside the orchestrator. Halving
// the budget for evals makes the harness run out of steps on examples
// that production would complete, inflating `no_workflow_built` rates.
const maxSteps = options.maxSteps ?? MAX_STEPS.BUILDER;
const interactivity = {
askUserCount: 0,

View File

@ -1,3 +1,5 @@
import type { ToolsInput } from '@mastra/core/agent';
jest.mock('@mastra/core/agent', () => ({
Agent: jest.fn().mockImplementation(function Agent(
this: { __registerMastra?: jest.Mock } & Record<string, unknown>,
@ -62,16 +64,28 @@ const { ToolSearchProcessor } =
const { Agent } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('@mastra/core/agent') as { Agent: jest.Mock };
const { createToolsFromLocalMcpServer } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('../../tools/filesystem/create-tools-from-mcp-server') as {
createToolsFromLocalMcpServer: jest.Mock;
};
function createMcpManagerStub() {
function createMcpManagerStub(regularTools: ToolsInput = {}, browserTools: ToolsInput = {}) {
return {
getRegularTools: jest.fn().mockResolvedValue({}),
getBrowserTools: jest.fn().mockResolvedValue({}),
getRegularTools: jest.fn().mockResolvedValue(regularTools),
getBrowserTools: jest.fn().mockResolvedValue(browserTools),
disconnect: jest.fn().mockResolvedValue(undefined),
};
}
describe('createInstanceAgent', () => {
beforeEach(() => {
Agent.mockClear();
ToolSearchProcessor.mockClear();
createToolsFromLocalMcpServer.mockReset();
createToolsFromLocalMcpServer.mockReturnValue({});
});
it('creates a fresh deferred tool processor for each run-scoped toolset', async () => {
const memoryConfig = {
storage: { id: 'memory-store' },
@ -111,7 +125,6 @@ describe('createInstanceAgent', () => {
});
it('does not attach a workspace to the orchestrator Agent', async () => {
Agent.mockClear();
const memoryConfig = { storage: { id: 'memory-store' } } as never;
const fakeWorkspace = { id: 'should-be-ignored' } as never;
@ -140,4 +153,48 @@ describe('createInstanceAgent', () => {
expect(firstCall).toBeDefined();
expect(firstCall[0]).not.toHaveProperty('workspace');
});
it('prefers local gateway tools over external MCP tools when names collide', async () => {
const memoryConfig = { storage: { id: 'memory-store' } } as never;
const localMcpServer = {
getToolsByCategory: jest.fn().mockReturnValue([]),
};
const localTools = {
shared_tool: { id: 'local-shared' },
} as unknown as ToolsInput;
const externalTools = {
shared_tool: { id: 'external-shared' },
github_workflows: { id: 'github-workflows' },
custom_plan: { id: 'custom-plan' },
} as unknown as ToolsInput;
const orchestrationContext: Record<string, unknown> = {
runId: 'local-priority',
browserMcpConfig: undefined,
};
createToolsFromLocalMcpServer.mockReturnValue(localTools);
await createInstanceAgent({
modelId: 'test-model',
context: {
runLabel: 'local-priority',
localGatewayStatus: undefined,
licenseHints: undefined,
localMcpServer,
},
orchestrationContext,
memoryConfig,
mcpManager: createMcpManagerStub(externalTools),
disableDeferredTools: true,
} as never);
const calls = Agent.mock.calls as Array<[Record<string, { tools?: ToolsInput }>]>;
const agentTools = calls[0]?.[0].tools as Record<string, { id: string }>;
const mcpContextTools = orchestrationContext.mcpTools as Record<string, { id: string }>;
expect(agentTools.shared_tool).toMatchObject({ id: 'local-shared' });
expect(agentTools.github_workflows).toMatchObject({ id: 'github-workflows' });
expect(agentTools.custom_plan).toMatchObject({ id: 'custom-plan' });
expect(mcpContextTools.shared_tool).toMatchObject({ id: 'local-shared' });
expect(mcpContextTools.github_workflows).toMatchObject({ id: 'github-workflows' });
});
});

View File

@ -0,0 +1,40 @@
import type { ToolsInput } from '@mastra/core/agent';
import { addSafeMcpTools, createClaimedToolNames } from '../mcp-tool-name-validation';
function makeTools(names: string[]): ToolsInput {
return Object.fromEntries(names.map((name) => [name, { id: name }])) as unknown as ToolsInput;
}
describe('MCP tool name validation', () => {
it('allows external tool names that contain native tool names as suffixes', () => {
const target: ToolsInput = {};
addSafeMcpTools(target, makeTools(['github_workflows', 'custom_plan']), {
source: 'external MCP',
claimedToolNames: createClaimedToolNames(['workflows', 'plan']),
});
expect(target.github_workflows).toBeDefined();
expect(target.custom_plan).toBeDefined();
});
it('still skips exact normalized name collisions with native tools', () => {
const target: ToolsInput = {};
const warn = jest.fn();
addSafeMcpTools(target, makeTools(['work-flows']), {
source: 'external MCP',
claimedToolNames: createClaimedToolNames(['workflows']),
warn,
});
expect(target['work-flows']).toBeUndefined();
expect(warn).toHaveBeenCalledWith(
expect.objectContaining({
source: 'external MCP',
toolName: 'work-flows',
}),
);
});
});

View File

@ -1,7 +1,11 @@
import type { ToolsInput } from '@mastra/core/agent';
import { z } from 'zod';
import { sanitizeMcpToolSchemas, sanitizeZodType } from '../sanitize-mcp-schemas';
import {
McpSchemaSanitizationError,
sanitizeMcpToolSchemas,
sanitizeZodType,
} from '../sanitize-mcp-schemas';
function makeTools(
schemas: Record<string, { input?: z.ZodTypeAny; output?: z.ZodTypeAny }>,
@ -17,6 +21,22 @@ function makeTools(
}
describe('sanitizeMcpToolSchemas', () => {
function makeDeepObject(depth: number): z.ZodTypeAny {
let schema: z.ZodTypeAny = z.string();
for (let i = 0; i < depth; i++) {
schema = z.object({ child: schema });
}
return schema;
}
function makeWideObject(width: number): z.ZodTypeAny {
const shape: z.ZodRawShape = {};
for (let i = 0; i < width; i++) {
shape[`field${i}`] = z.string();
}
return z.object(shape);
}
it('should return empty tools input unchanged', () => {
const tools = {} as ToolsInput;
@ -270,6 +290,170 @@ describe('sanitizeMcpToolSchemas', () => {
});
});
describe('depth bounding', () => {
it('should throw a typed error when a schema exceeds the maximum depth', () => {
expect(() => sanitizeZodType(makeDeepObject(4), false, { maxDepth: 2 })).toThrow(
McpSchemaSanitizationError,
);
});
it('should remove only the offending MCP tool when one schema is too deep', () => {
const onError = jest.fn();
const tools = makeTools({
validTool: { input: z.object({ name: z.string() }) },
deepTool: { input: makeDeepObject(4) },
});
const result = sanitizeMcpToolSchemas(tools, { maxDepth: 2, onError });
expect(Object.keys(result)).toEqual(['validTool']);
expect(onError).toHaveBeenCalledWith(expect.any(McpSchemaSanitizationError));
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('deepTool');
expect(onErrorCalls[0]?.[0].details.maxDepth).toBe(2);
});
it('should bound arrays, records, and unions', () => {
const tools = makeTools({
arrayTool: { input: z.array(makeDeepObject(3)) },
recordTool: { input: z.record(makeDeepObject(3)) },
unionTool: { input: z.union([makeDeepObject(3), z.null()]) },
});
const result = sanitizeMcpToolSchemas(tools, { maxDepth: 2 });
expect(Object.keys(result)).toEqual([]);
});
it('should bound lazy schemas', () => {
const onError = jest.fn();
const tools = makeTools({
lazyTool: { input: z.object({ payload: z.lazy(() => makeWideObject(4)) }) },
});
const result = sanitizeMcpToolSchemas(tools, {
maxObjectProperties: 2,
onError,
});
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('lazyTool');
expect(onErrorCalls[0]?.[0].details.limitType).toBe('objectProperties');
});
it('should remove tools containing unsupported tuple or intersection schemas', () => {
const onError = jest.fn();
const tools = makeTools({
tupleTool: { input: z.object({ pair: z.tuple([z.string(), z.null()]) }) },
intersectionTool: {
input: z.object({
payload: z.intersection(z.object({ name: z.string() }), z.object({ id: z.string() })),
}),
},
});
const result = sanitizeMcpToolSchemas(tools, { onError });
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls.map(([error]) => error.details)).toEqual(
expect.arrayContaining([
expect.objectContaining({
toolName: 'tupleTool',
limitType: 'unsupportedType',
zodType: 'ZodTuple',
}),
expect.objectContaining({
toolName: 'intersectionTool',
limitType: 'unsupportedType',
zodType: 'ZodIntersection',
}),
]),
);
});
it('should remove tools containing unsupported wrapper types', () => {
const onError = jest.fn();
const tools = makeTools({
mapTool: { input: z.object({ values: z.map(z.string(), z.string()) }) },
});
const result = sanitizeMcpToolSchemas(tools, { onError });
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('mapTool');
expect(onErrorCalls[0]?.[0].details.limitType).toBe('unsupportedType');
expect(onErrorCalls[0]?.[0].details.zodType).toBe('ZodMap');
});
it('should remove a shallow MCP tool with too many object properties', () => {
const onError = jest.fn();
const tools = makeTools({
wideTool: { input: makeWideObject(4) },
});
const result = sanitizeMcpToolSchemas(tools, {
maxObjectProperties: 2,
onError,
});
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('wideTool');
expect(onErrorCalls[0]?.[0].details.limitType).toBe('objectProperties');
expect(onErrorCalls[0]?.[0].details.limit).toBe(2);
expect(onErrorCalls[0]?.[0].details.count).toBe(4);
});
it('should remove a shallow MCP tool with too many union options', () => {
const onError = jest.fn();
const tools = makeTools({
unionTool: {
input: z.object({
value: z.union([z.literal('a'), z.literal('b'), z.literal('c')]),
}),
},
});
const result = sanitizeMcpToolSchemas(tools, {
maxUnionOptions: 2,
onError,
});
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('unionTool');
expect(onErrorCalls[0]?.[0].details.limitType).toBe('unionOptions');
expect(onErrorCalls[0]?.[0].details.limit).toBe(2);
expect(onErrorCalls[0]?.[0].details.count).toBe(3);
});
it('should remove an MCP tool that exceeds the total schema node budget', () => {
const onError = jest.fn();
const tools = makeTools({
nodeBudgetTool: {
input: z.object({
first: z.string(),
second: z.string(),
}),
},
});
const result = sanitizeMcpToolSchemas(tools, {
maxNodes: 2,
onError,
});
expect(Object.keys(result)).toEqual([]);
const onErrorCalls = onError.mock.calls as Array<[McpSchemaSanitizationError]>;
expect(onErrorCalls[0]?.[0].details.toolName).toBe('nodeBudgetTool');
expect(onErrorCalls[0]?.[0].details.limitType).toBe('nodes');
expect(onErrorCalls[0]?.[0].details.limit).toBe(2);
});
});
describe('strict mode', () => {
it('should throw on conflicting field descriptions in discriminated unions', () => {
const union = z.discriminatedUnion('action', [

View File

@ -10,6 +10,11 @@ import { getSystemPrompt } from './system-prompt';
import { createToolsFromLocalMcpServer } from '../tools/filesystem/create-tools-from-mcp-server';
import { buildAgentTraceInputs, mergeTraceRunInputs } from '../tracing/langsmith-tracing';
import type { CreateInstanceAgentOptions } from '../types';
import {
addSafeMcpTools,
createClaimedToolNames,
type McpToolNameValidationError,
} from './mcp-tool-name-validation';
let cachedMastra: Mastra | null = null;
let cachedMastraStorageKey = '';
@ -60,8 +65,14 @@ export async function createInstanceAgent(options: CreateInstanceAgentOptions):
// Load MCP tools (cached by config-hash inside the manager — only spawns
// processes / opens connections on first call or config change).
const mcpTools = await mcpManager.getRegularTools(mcpServers);
const browserMcpTools = await mcpManager.getBrowserTools(orchestrationContext?.browserMcpConfig);
const mcpTools = await mcpManager.getRegularTools(mcpServers, context.logger);
const browserMcpTools = await mcpManager.getBrowserTools(
orchestrationContext?.browserMcpConfig,
context.logger,
);
const rawLocalMcpTools = context.localMcpServer
? createToolsFromLocalMcpServer(context.localMcpServer, context.logger)
: {};
// Browser tool names — used to exclude them from the orchestrator's direct toolset.
// Browser tools are only accessible via browser-credential-setup (sub-agent) to prevent
@ -71,54 +82,73 @@ export async function createInstanceAgent(options: CreateInstanceAgentOptions):
...(context.localMcpServer?.getToolsByCategory('browser').map((t) => t.name) ?? []),
]);
// Store ALL MCP tools (external + browser) on orchestrationContext for sub-agents
// (browser-credential-setup, delegate). NOT given to the orchestrator directly.
// Store ALL MCP tools (external + browser + local gateway) on orchestrationContext for
// sub-agents (browser-credential-setup, delegate). NOT given to the orchestrator directly.
const allMcpTools: ToolsInput = {};
const domainToolNames = new Set(Object.keys(domainTools));
for (const [name, tool] of Object.entries({ ...mcpTools, ...browserMcpTools })) {
if (!domainToolNames.has(name)) {
allMcpTools[name] = tool;
}
}
if (orchestrationContext && Object.keys(allMcpTools).length > 0) {
orchestrationContext.mcpTools = allMcpTools;
}
const warnSkippedMcpTool = (error: McpToolNameValidationError) => {
context.logger?.warn('Skipped MCP tool with unsafe name', {
toolName: error.toolName,
source: error.source,
reason: error.message,
});
};
// Build orchestration tools (plan, delegate) — orchestrator-only
// Must happen after mcpTools are set on orchestrationContext
const orchestrationTools = orchestrationContext
? createOrchestrationTools(orchestrationContext)
: {};
// Prevent MCP tools from shadowing domain or orchestration tools.
// A malicious/misconfigured MCP server could register a tool named "run-workflow"
// which would silently replace the real domain tool via object spread.
// Keep MCP tools from shadowing domain or orchestration tools during object composition.
const reservedToolNames = new Set([
...Object.keys(domainTools),
...Object.keys(orchestrationTools),
]);
const safeMcpTools: ToolsInput = {};
for (const [name, tool] of Object.entries(mcpTools)) {
if (reservedToolNames.has(name)) continue;
safeMcpTools[name] = tool;
const mcpContextToolNames = createClaimedToolNames(reservedToolNames);
addSafeMcpTools(allMcpTools, rawLocalMcpTools, {
source: 'local gateway MCP',
claimedToolNames: mcpContextToolNames,
warn: warnSkippedMcpTool,
});
addSafeMcpTools(allMcpTools, mcpTools, {
source: 'external MCP',
claimedToolNames: mcpContextToolNames,
warn: warnSkippedMcpTool,
});
addSafeMcpTools(allMcpTools, browserMcpTools, {
source: 'browser MCP',
claimedToolNames: mcpContextToolNames,
warn: warnSkippedMcpTool,
});
const orchestratorLocalMcpTools = Object.fromEntries(
Object.entries(rawLocalMcpTools).filter(([name]) => !browserToolNames.has(name)),
);
if (orchestrationContext && Object.keys(allMcpTools).length > 0) {
orchestrationContext.mcpTools = allMcpTools;
}
const claimedOrchestratorToolNames = createClaimedToolNames(reservedToolNames);
const safeLocalMcpTools: ToolsInput = {};
addSafeMcpTools(safeLocalMcpTools, orchestratorLocalMcpTools, {
source: 'local gateway MCP',
claimedToolNames: claimedOrchestratorToolNames,
warn: warnSkippedMcpTool,
});
const safeMcpTools: ToolsInput = {};
addSafeMcpTools(safeMcpTools, mcpTools, {
source: 'external MCP',
claimedToolNames: claimedOrchestratorToolNames,
warn: warnSkippedMcpTool,
});
// ── Tool search: split tools into always-loaded core vs deferred ────────
// Anthropic guidance: "Keep your 3-5 most-used tools always loaded, defer the rest."
// Tool selection accuracy degrades past 10+ tools; tool search improves it significantly.
const localMcpTools = context.localMcpServer
? Object.fromEntries(
Object.entries(createToolsFromLocalMcpServer(context.localMcpServer)).filter(
([name]) => !browserToolNames.has(name),
),
)
: {};
const allOrchestratorTools: ToolsInput = {
...orchestratorDomainTools,
...orchestrationTools,
...safeLocalMcpTools, // gateway tools — browser tools excluded via browserToolNames
...safeMcpTools, // external MCP only — browser tools excluded
...localMcpTools, // gateway tools — browser tools excluded via browserToolNames
};
const tracedOrchestratorTools =
orchestrationContext?.tracing?.wrapTools(allOrchestratorTools, {

View File

@ -0,0 +1,82 @@
import type { ToolsInput } from '@mastra/core/agent';
import { isSafeObjectKey } from '@n8n/api-types';
export class McpToolNameValidationError extends Error {
constructor(
message: string,
readonly toolName: string,
readonly source: string,
) {
super(message);
this.name = 'McpToolNameValidationError';
}
}
const MCP_TOOL_NAME_PATTERN = /^[A-Za-z][A-Za-z0-9_-]{0,63}$/;
export function isSafeMcpIdentifierName(name: string): boolean {
const normalizedName = name.normalize('NFKC');
return (
normalizedName === name &&
MCP_TOOL_NAME_PATTERN.test(name) &&
isSafeObjectKey(normalizedName.toLowerCase())
);
}
export function normalizeMcpToolName(name: string): string {
return name
.normalize('NFKC')
.toLowerCase()
.replace(/[^a-z0-9]/g, '');
}
export function validateMcpToolName(name: string, source: string): string {
if (!isSafeMcpIdentifierName(name)) {
throw new McpToolNameValidationError(
`MCP tool "${name}" from ${source} has an invalid name`,
name,
source,
);
}
return normalizeMcpToolName(name);
}
export function createClaimedToolNames(names: Iterable<string>): Map<string, string> {
const claimed = new Map<string, string>();
for (const name of names) {
claimed.set(normalizeMcpToolName(name), name);
}
return claimed;
}
export function addSafeMcpTools(
target: ToolsInput,
sourceTools: ToolsInput,
options: {
source: string;
claimedToolNames: Map<string, string>;
warn?: (error: McpToolNameValidationError) => void;
},
): void {
for (const [name, tool] of Object.entries(sourceTools)) {
try {
const normalizedName = validateMcpToolName(name, options.source);
const claimedBy = options.claimedToolNames.get(normalizedName);
if (claimedBy) {
throw new McpToolNameValidationError(
`MCP tool "${name}" from ${options.source} conflicts with "${claimedBy}"`,
name,
options.source,
);
}
options.claimedToolNames.set(normalizedName, name);
target[name] = tool;
} catch (error) {
if (error instanceof McpToolNameValidationError) {
options.warn?.(error);
continue;
}
throw error;
}
}
}

View File

@ -15,6 +15,194 @@
import type { ToolsInput } from '@mastra/core/agent';
import { z } from 'zod';
export const MCP_SCHEMA_MAX_DEPTH = 32;
export const MCP_SCHEMA_MAX_NODES = 1_000;
export const MCP_SCHEMA_MAX_OBJECT_PROPERTIES = 250;
export const MCP_SCHEMA_MAX_UNION_OPTIONS = 100;
type McpSchemaLimitType =
| 'depth'
| 'nodes'
| 'objectProperties'
| 'unionOptions'
| 'unsupportedType';
export class McpSchemaSanitizationError extends Error {
constructor(
message: string,
readonly details: {
toolName?: string;
path: string;
depth: number;
maxDepth: number;
limit?: number;
limitType?: McpSchemaLimitType;
count?: number;
zodType?: string;
},
) {
super(message);
this.name = 'McpSchemaSanitizationError';
}
}
interface SanitizeBudget {
nodes: number;
}
interface SanitizeContext {
strict: boolean;
toolName?: string;
path: string;
depth: number;
maxDepth: number;
maxNodes: number;
maxObjectProperties: number;
maxUnionOptions: number;
budget: SanitizeBudget;
}
interface SanitizeZodTypeOptions {
maxDepth?: number;
maxNodes?: number;
maxObjectProperties?: number;
maxUnionOptions?: number;
toolName?: string;
path?: string;
budget?: SanitizeBudget;
}
interface ValidateJsonSchemaOptions {
maxDepth?: number;
maxNodes?: number;
maxObjectProperties?: number;
maxUnionOptions?: number;
toolName?: string;
}
interface JsonSchemaValidationContext {
toolName?: string;
maxDepth: number;
maxNodes: number;
maxObjectProperties: number;
maxUnionOptions: number;
budget: SanitizeBudget;
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value);
}
function throwJsonSchemaLimitError(
context: JsonSchemaValidationContext,
path: string,
depth: number,
message: string,
limitType: McpSchemaLimitType,
limit: number,
count?: number,
): never {
throw new McpSchemaSanitizationError(message, {
toolName: context.toolName,
path,
depth,
maxDepth: context.maxDepth,
limit,
limitType,
count,
});
}
function validateJsonSchemaNode(
value: unknown,
path: string,
depth: number,
context: JsonSchemaValidationContext,
): void {
if (depth > context.maxDepth) {
throwJsonSchemaLimitError(
context,
path,
depth,
`MCP schema exceeds maximum depth of ${context.maxDepth}`,
'depth',
context.maxDepth,
depth,
);
}
context.budget.nodes++;
if (context.budget.nodes > context.maxNodes) {
throwJsonSchemaLimitError(
context,
path,
depth,
`MCP schema exceeds maximum node count of ${context.maxNodes}`,
'nodes',
context.maxNodes,
context.budget.nodes,
);
}
if (Array.isArray(value)) {
for (const [index, item] of value.entries()) {
validateJsonSchemaNode(item, `${path}[${index}]`, depth + 1, context);
}
return;
}
if (!isRecord(value)) return;
const properties = value.properties;
if (isRecord(properties)) {
const propertyCount = Object.keys(properties).length;
if (propertyCount > context.maxObjectProperties) {
throwJsonSchemaLimitError(
context,
`${path}.properties`,
depth + 1,
`MCP schema object exceeds maximum property count of ${context.maxObjectProperties}`,
'objectProperties',
context.maxObjectProperties,
propertyCount,
);
}
}
for (const unionKey of ['anyOf', 'oneOf', 'allOf']) {
const unionOptions = value[unionKey];
if (Array.isArray(unionOptions) && unionOptions.length > context.maxUnionOptions) {
throwJsonSchemaLimitError(
context,
`${path}.${unionKey}`,
depth + 1,
`MCP schema union exceeds maximum option count of ${context.maxUnionOptions}`,
'unionOptions',
context.maxUnionOptions,
unionOptions.length,
);
}
}
for (const [key, child] of Object.entries(value)) {
validateJsonSchemaNode(child, `${path}.${key}`, depth + 1, context);
}
}
export function assertMcpJsonSchemaWithinLimits(
schema: unknown,
options: ValidateJsonSchemaOptions = {},
): void {
validateJsonSchemaNode(schema, '$.inputSchema', 0, {
toolName: options.toolName,
maxDepth: options.maxDepth ?? MCP_SCHEMA_MAX_DEPTH,
maxNodes: options.maxNodes ?? MCP_SCHEMA_MAX_NODES,
maxObjectProperties: options.maxObjectProperties ?? MCP_SCHEMA_MAX_OBJECT_PROPERTIES,
maxUnionOptions: options.maxUnionOptions ?? MCP_SCHEMA_MAX_UNION_OPTIONS,
budget: { nodes: 0 },
});
}
/**
* Recursively walk a Zod schema tree and replace Anthropic-incompatible types.
*
@ -23,7 +211,101 @@ import { z } from 'zod';
* mismatched descriptions at construction time rather than silently degrading
* the schema the model sees.
*/
export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodTypeAny {
export function sanitizeZodType(
schema: z.ZodTypeAny,
strict = false,
options: SanitizeZodTypeOptions = {},
): z.ZodTypeAny {
return sanitizeZodTypeInner(schema, {
strict,
toolName: options.toolName,
path: options.path ?? '$',
depth: 0,
maxDepth: options.maxDepth ?? MCP_SCHEMA_MAX_DEPTH,
maxNodes: options.maxNodes ?? MCP_SCHEMA_MAX_NODES,
maxObjectProperties: options.maxObjectProperties ?? MCP_SCHEMA_MAX_OBJECT_PROPERTIES,
maxUnionOptions: options.maxUnionOptions ?? MCP_SCHEMA_MAX_UNION_OPTIONS,
budget: options.budget ?? { nodes: 0 },
});
}
function createLimitError(
context: SanitizeContext,
message: string,
limitType: McpSchemaLimitType,
limit: number,
count?: number,
): McpSchemaSanitizationError {
return new McpSchemaSanitizationError(message, {
toolName: context.toolName,
path: context.path,
depth: context.depth,
maxDepth: context.maxDepth,
limit,
limitType,
count,
});
}
function createUnsupportedTypeError(
context: SanitizeContext,
schema: z.ZodTypeAny,
): McpSchemaSanitizationError {
const definition = schema._def as { typeName?: unknown };
const zodType =
typeof definition.typeName === 'string' ? definition.typeName : schema.constructor.name;
return new McpSchemaSanitizationError(`MCP schema contains unsupported Zod type ${zodType}`, {
toolName: context.toolName,
path: context.path,
depth: context.depth,
maxDepth: context.maxDepth,
limitType: 'unsupportedType',
zodType,
});
}
function isSupportedLeafSchema(schema: z.ZodTypeAny): boolean {
return (
schema instanceof z.ZodString ||
schema instanceof z.ZodNumber ||
schema instanceof z.ZodBoolean ||
schema instanceof z.ZodDate ||
schema instanceof z.ZodAny ||
schema instanceof z.ZodUnknown ||
schema instanceof z.ZodLiteral ||
schema instanceof z.ZodEnum ||
schema instanceof z.ZodNativeEnum
);
}
function sanitizeZodTypeInner(schema: z.ZodTypeAny, context: SanitizeContext): z.ZodTypeAny {
if (context.depth > context.maxDepth) {
throw createLimitError(
context,
`MCP schema exceeds maximum depth of ${context.maxDepth}`,
'depth',
context.maxDepth,
context.depth,
);
}
context.budget.nodes++;
if (context.budget.nodes > context.maxNodes) {
throw createLimitError(
context,
`MCP schema exceeds maximum node count of ${context.maxNodes}`,
'nodes',
context.maxNodes,
context.budget.nodes,
);
}
const sanitizeChild = (child: z.ZodTypeAny, path: string): z.ZodTypeAny =>
sanitizeZodTypeInner(child, {
...context,
path,
depth: context.depth + 1,
});
// ZodNull → replace with optional undefined (shouldn't appear standalone, but handle it)
if (schema instanceof z.ZodNull) {
return z.string().optional();
@ -31,7 +313,10 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
// ZodNullable<T> → T.optional()
if (schema instanceof z.ZodNullable) {
return sanitizeZodType((schema as z.ZodNullable<z.ZodTypeAny>).unwrap(), strict).optional();
return sanitizeChild(
(schema as z.ZodNullable<z.ZodTypeAny>).unwrap(),
`${context.path}?`,
).optional();
}
// ZodDiscriminatedUnion — flatten to a single z.object
@ -42,6 +327,15 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
const disc = schema as z.ZodDiscriminatedUnion<string, Array<z.ZodObject<z.ZodRawShape>>>;
const discriminator = disc.discriminator;
const variants = [...disc.options.values()] as Array<z.ZodObject<z.ZodRawShape>>;
if (variants.length > context.maxUnionOptions) {
throw createLimitError(
context,
`MCP schema discriminated union exceeds maximum option count of ${context.maxUnionOptions}`,
'unionOptions',
context.maxUnionOptions,
variants.length,
);
}
// Phase 1: Collect metadata from all variants
const actionMeta: Array<{ value: string; description?: string }> = [];
@ -70,6 +364,16 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
});
}
}
const mergedPropertyCount = fieldMeta.size + (actionMeta.length > 0 ? 1 : 0);
if (mergedPropertyCount > context.maxObjectProperties) {
throw createLimitError(
context,
`MCP schema object exceeds maximum property count of ${context.maxObjectProperties}`,
'objectProperties',
context.maxObjectProperties,
mergedPropertyCount,
);
}
// Phase 2: Build the merged shape
const mergedShape: z.ZodRawShape = {};
@ -87,12 +391,15 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
// Build each field with properly merged descriptions
for (const [fieldName, entries] of fieldMeta) {
const sanitizedField = sanitizeZodType(entries[0].type, strict).optional();
const sanitizedField = sanitizeChild(
entries[0].type,
`${context.path}.${fieldName}`,
).optional();
// Detect enum value conflicts across variants.
// Only the first variant's type is used (entries[0].type), so differing
// enum values in other variants would be silently lost.
if (strict && entries.length > 1) {
if (context.strict && entries.length > 1) {
const unwrapOptional = (t: z.ZodTypeAny): z.ZodTypeAny =>
t instanceof z.ZodOptional ? unwrapOptional(t.unwrap() as z.ZodTypeAny) : t;
@ -128,7 +435,7 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
const uniqueDescs = new Set(withDesc.map((d) => d.description));
if (uniqueDescs.size > 1) {
if (strict) {
if (context.strict) {
const conflictDetails = withDesc
.map((d) => ` Action "${d.action}": "${d.description}"`)
.join('\n');
@ -161,9 +468,20 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
if (schema instanceof z.ZodUnion) {
const options = (schema as z.ZodUnion<[z.ZodTypeAny, ...z.ZodTypeAny[]]>)
.options as z.ZodTypeAny[];
if (options.length > context.maxUnionOptions) {
throw createLimitError(
context,
`MCP schema union exceeds maximum option count of ${context.maxUnionOptions}`,
'unionOptions',
context.maxUnionOptions,
options.length,
);
}
const nonNull = options.filter((o) => !(o instanceof z.ZodNull));
const hadNull = nonNull.length < options.length;
const sanitized = nonNull.map((o) => sanitizeZodType(o, strict));
const sanitized = nonNull.map((o, index) =>
sanitizeChild(o, `${context.path}.union[${index}]`),
);
if (sanitized.length === 0) {
// All options were null — degenerate case
@ -179,27 +497,47 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
// ZodObject — recurse into shape
if (schema instanceof z.ZodObject) {
const shape = (schema as z.ZodObject<z.ZodRawShape>).shape;
const entries = Object.entries(shape);
if (entries.length > context.maxObjectProperties) {
throw createLimitError(
context,
`MCP schema object exceeds maximum property count of ${context.maxObjectProperties}`,
'objectProperties',
context.maxObjectProperties,
entries.length,
);
}
const newShape: z.ZodRawShape = {};
for (const [key, value] of Object.entries(shape)) {
newShape[key] = sanitizeZodType(value, strict);
for (const [key, value] of entries) {
newShape[key] = sanitizeChild(value, `${context.path}.${key}`);
}
return z.object(newShape);
}
// ZodLazy - resolve during sanitization so limits and null-stripping still apply
if (schema instanceof z.ZodLazy) {
return sanitizeChild((schema as z.ZodLazy<z.ZodTypeAny>).schema, `${context.path}.lazy`);
}
// ZodOptional — recurse into inner
if (schema instanceof z.ZodOptional) {
return sanitizeZodType((schema as z.ZodOptional<z.ZodTypeAny>).unwrap(), strict).optional();
return sanitizeChild(
(schema as z.ZodOptional<z.ZodTypeAny>).unwrap(),
`${context.path}?`,
).optional();
}
// ZodArray — recurse into element
if (schema instanceof z.ZodArray) {
return z.array(sanitizeZodType((schema as z.ZodArray<z.ZodTypeAny>).element, strict));
return z.array(
sanitizeChild((schema as z.ZodArray<z.ZodTypeAny>).element, `${context.path}[]`),
);
}
// ZodDefault — recurse into inner
if (schema instanceof z.ZodDefault) {
const inner = (schema as z.ZodDefault<z.ZodTypeAny>)._def.innerType;
return sanitizeZodType(inner, strict).default(
return sanitizeChild(inner, `${context.path}.default`).default(
(schema as z.ZodDefault<z.ZodTypeAny>)._def.defaultValue(),
);
}
@ -207,12 +545,74 @@ export function sanitizeZodType(schema: z.ZodTypeAny, strict = false): z.ZodType
// ZodRecord — recurse into value type
if (schema instanceof z.ZodRecord) {
return z.record(
sanitizeZodType((schema as z.ZodRecord<z.ZodString, z.ZodTypeAny>).valueSchema, strict),
sanitizeChild(
(schema as z.ZodRecord<z.ZodString, z.ZodTypeAny>).valueSchema,
`${context.path}.*`,
),
);
}
// Leaf types (string, number, boolean, enum, literal, etc.) — pass through
return schema;
// ZodEffects - recurse into the source type. Effects are runtime behavior,
// but the provider only needs a safe JSON-compatible schema.
if (schema instanceof z.ZodEffects) {
return sanitizeChild(
(schema as z.ZodEffects<z.ZodTypeAny>).innerType(),
`${context.path}.effect`,
);
}
// ZodPipeline - recurse into both schemas so nested unsupported types cannot hide
if (schema instanceof z.ZodPipeline) {
const pipeline = schema as z.ZodPipeline<z.ZodTypeAny, z.ZodTypeAny>;
return z.pipeline(
sanitizeChild(pipeline._def.in, `${context.path}.pipeline.in`),
sanitizeChild(pipeline._def.out, `${context.path}.pipeline.out`),
);
}
// ZodReadonly / ZodBranded / ZodCatch - recurse into the inner type. The wrappers
// do not add useful provider-schema information, so preserving the safe inner
// schema is preferable to letting nested unsupported types slip through.
if (schema instanceof z.ZodReadonly) {
return sanitizeChild(
(schema as z.ZodReadonly<z.ZodTypeAny>).unwrap(),
`${context.path}.readonly`,
);
}
if (schema instanceof z.ZodBranded) {
return sanitizeChild(
(schema as z.ZodBranded<z.ZodTypeAny, string>).unwrap(),
`${context.path}.brand`,
);
}
if (schema instanceof z.ZodCatch) {
return sanitizeChild(
(schema as z.ZodCatch<z.ZodTypeAny>).removeCatch(),
`${context.path}.catch`,
);
}
if (
schema instanceof z.ZodMap ||
schema instanceof z.ZodSet ||
schema instanceof z.ZodPromise ||
schema instanceof z.ZodFunction ||
schema instanceof z.ZodIntersection ||
schema instanceof z.ZodTuple ||
schema instanceof z.ZodNaN ||
schema instanceof z.ZodBigInt ||
schema instanceof z.ZodUndefined ||
schema instanceof z.ZodNever ||
schema instanceof z.ZodVoid ||
schema instanceof z.ZodSymbol
) {
throw createUnsupportedTypeError(context, schema);
}
// Leaf types (string, number, boolean, enum, literal, etc.) - pass through.
if (isSupportedLeafSchema(schema)) return schema;
throw createUnsupportedTypeError(context, schema);
}
/**
@ -259,14 +659,51 @@ export function sanitizeInputSchema<T extends z.ZodTypeAny>(schema: T): T {
* action context (e.g. 'For "create": ... For "delete": ...') rather than
* throwing.
*/
export function sanitizeMcpToolSchemas(tools: ToolsInput): ToolsInput {
for (const tool of Object.values(tools)) {
export function sanitizeMcpToolSchemas(
tools: ToolsInput,
options: {
maxDepth?: number;
maxNodes?: number;
maxObjectProperties?: number;
maxUnionOptions?: number;
onError?: (error: McpSchemaSanitizationError) => void;
} = {},
): ToolsInput {
for (const [name, tool] of Object.entries(tools)) {
const t = tool as { inputSchema?: z.ZodTypeAny; outputSchema?: z.ZodTypeAny };
if (t.inputSchema) {
t.inputSchema = ensureTopLevelObject(sanitizeZodType(t.inputSchema));
}
if (t.outputSchema) {
t.outputSchema = sanitizeZodType(t.outputSchema);
const budget = { nodes: 0 };
try {
if (t.inputSchema) {
t.inputSchema = ensureTopLevelObject(
sanitizeZodType(t.inputSchema, false, {
maxDepth: options.maxDepth,
maxNodes: options.maxNodes,
maxObjectProperties: options.maxObjectProperties,
maxUnionOptions: options.maxUnionOptions,
toolName: name,
path: '$.inputSchema',
budget,
}),
);
}
if (t.outputSchema) {
t.outputSchema = sanitizeZodType(t.outputSchema, false, {
maxDepth: options.maxDepth,
maxNodes: options.maxNodes,
maxObjectProperties: options.maxObjectProperties,
maxUnionOptions: options.maxUnionOptions,
toolName: name,
path: '$.outputSchema',
budget,
});
}
} catch (error) {
if (error instanceof McpSchemaSanitizationError) {
delete (tools as Record<string, unknown>)[name];
options.onError?.(error);
continue;
}
throw error;
}
}

View File

@ -17,6 +17,29 @@ import { McpClientManager } from '../mcp-client-manager';
const { MCPClient: mockedMcpClient } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('@mastra/mcp') as { MCPClient: jest.Mock };
const { sanitizeMcpToolSchemas: mockedSanitizeMcpToolSchemas } =
// eslint-disable-next-line @typescript-eslint/no-require-imports
require('../../agent/sanitize-mcp-schemas') as {
sanitizeMcpToolSchemas: jest.Mock;
};
interface LoggerMock {
warn: jest.Mock;
}
interface SanitizeOptions {
onError?: (error: {
message: string;
details: {
toolName?: string;
path: string;
depth: number;
maxDepth: number;
limitType?: string;
limit?: number;
};
}) => void;
}
function createValidatorMock(): jest.Mocked<SsrfUrlValidator> {
return {
@ -83,6 +106,93 @@ describe('McpClientManager', () => {
});
});
describe('server and schema filtering', () => {
it('skips external MCP servers with unsafe names', async () => {
const logger: LoggerMock = { warn: jest.fn() };
const manager = new McpClientManager();
await manager.getRegularTools(
[
{ name: 'bad name', url: 'https://bad.example.com/mcp' },
{ name: 'safe_server', url: 'https://safe.example.com/mcp' },
],
logger as never,
);
expect(mockedMcpClient).toHaveBeenCalledTimes(1);
const mcpClientCalls = mockedMcpClient.mock.calls as Array<
[{ servers: Record<string, unknown> }]
>;
const [mcpClientConfig] = mcpClientCalls[0];
expect(mcpClientConfig.servers).not.toHaveProperty('bad name');
expect(mcpClientConfig.servers).toHaveProperty('safe_server');
expect(logger.warn).toHaveBeenCalledWith(
'Skipped MCP server with unsafe name',
expect.objectContaining({
serverName: 'bad name',
source: 'external MCP',
}),
);
});
it('skips browser MCP configs with unsafe names', async () => {
const logger: LoggerMock = { warn: jest.fn() };
const manager = new McpClientManager();
await expect(
manager.getBrowserTools(
{ name: 'bad name', url: 'https://browser.example.com/mcp' },
logger as never,
),
).resolves.toEqual({});
expect(mockedMcpClient).not.toHaveBeenCalled();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped MCP server with unsafe name',
expect.objectContaining({
serverName: 'bad name',
source: 'browser MCP',
}),
);
});
it('logs tools skipped during schema sanitization', async () => {
const logger: LoggerMock = { warn: jest.fn() };
mockedSanitizeMcpToolSchemas.mockImplementationOnce(
(_tools: Record<string, unknown>, options?: SanitizeOptions) => {
options?.onError?.({
message: 'MCP schema exceeds maximum depth of 32',
details: {
toolName: 'deep_tool',
path: '$.input',
depth: 33,
maxDepth: 32,
limitType: 'depth',
limit: 32,
},
});
return {};
},
);
const manager = new McpClientManager();
await manager.getRegularTools(
[{ name: 'safe_server', url: 'https://safe.example.com/mcp' }],
logger as never,
);
expect(logger.warn).toHaveBeenCalledWith(
'Skipped MCP tool with unsupported schema',
expect.objectContaining({
toolName: 'deep_tool',
source: 'external MCP',
path: '$.input',
limitType: 'depth',
}),
);
});
});
describe('SSRF policy (opt-in)', () => {
it('does not call validateUrl when no validator is supplied', async () => {
const manager = new McpClientManager();

View File

@ -4,7 +4,10 @@ import type { Result } from 'n8n-workflow';
import { UserError } from 'n8n-workflow';
import { nanoid } from 'nanoid';
import { isSafeMcpIdentifierName } from '../agent/mcp-tool-name-validation';
import { sanitizeMcpToolSchemas } from '../agent/sanitize-mcp-schemas';
import type { McpSchemaSanitizationError } from '../agent/sanitize-mcp-schemas';
import type { Logger } from '../logger';
import type { McpServerConfig } from '../types';
/**
@ -32,6 +35,37 @@ function buildMcpServers(configs: McpServerConfig[]): Record<string, McpServerEn
return servers;
}
function warnSkippedMcpSchema(logger: Logger | undefined, source: string) {
return (error: McpSchemaSanitizationError) => {
logger?.warn('Skipped MCP tool with unsupported schema', {
toolName: error.details.toolName,
source,
path: error.details.path,
depth: error.details.depth,
maxDepth: error.details.maxDepth,
limitType: error.details.limitType,
limit: error.details.limit,
reason: error.message,
});
};
}
function getSafeMcpServers(
configs: McpServerConfig[],
logger: Logger | undefined,
source: string,
): McpServerConfig[] {
return configs.filter((config) => {
if (isSafeMcpIdentifierName(config.name)) return true;
logger?.warn('Skipped MCP server with unsafe name', {
serverName: config.name,
source,
});
return false;
});
}
/**
* Owns the lifecycle of MCP client connections used by the orchestrator.
*
@ -63,32 +97,48 @@ export class McpClientManager {
constructor(private readonly ssrfValidator?: SsrfUrlValidator) {}
async getRegularTools(configs: McpServerConfig[]): Promise<ToolsInput> {
if (configs.length === 0) return {};
async getRegularTools(configs: McpServerConfig[], logger?: Logger): Promise<ToolsInput> {
const safeConfigs = getSafeMcpServers(configs, logger, 'external MCP');
if (safeConfigs.length === 0) return {};
const key = JSON.stringify(configs);
const key = JSON.stringify(safeConfigs);
return await this.getOrLoad(
this.regularToolsByKey,
this.inFlightRegularByKey,
key,
async () => {
await this.validateConfigs(configs);
return await this.connectAndListTools(`mcp-${nanoid(6)}`, configs, key);
await this.validateConfigs(safeConfigs);
return await this.connectAndListTools(
`mcp-${nanoid(6)}`,
safeConfigs,
key,
logger,
'external MCP',
);
},
);
}
async getBrowserTools(config: McpServerConfig | undefined): Promise<ToolsInput> {
async getBrowserTools(config: McpServerConfig | undefined, logger?: Logger): Promise<ToolsInput> {
if (!config) return {};
const key = JSON.stringify(config);
const [safeConfig] = getSafeMcpServers([config], logger, 'browser MCP');
if (!safeConfig) return {};
const key = JSON.stringify(safeConfig);
return await this.getOrLoad(
this.browserToolsByKey,
this.inFlightBrowserByKey,
key,
async () => {
await this.validateConfigs([config]);
return await this.connectAndListTools(`browser-mcp-${nanoid(6)}`, [config], key);
await this.validateConfigs([safeConfig]);
return await this.connectAndListTools(
`browser-mcp-${nanoid(6)}`,
[safeConfig],
key,
logger,
'browser MCP',
);
},
);
}
@ -167,9 +217,13 @@ export class McpClientManager {
id: string,
configs: McpServerConfig[],
clientKey: string,
logger: Logger | undefined,
source: string,
): Promise<ToolsInput> {
const client = new MCPClient({ id, servers: buildMcpServers(configs) });
this.clientsByKey.set(clientKey, client);
return sanitizeMcpToolSchemas(await client.listTools());
return sanitizeMcpToolSchemas(await client.listTools(), {
onError: warnSkippedMcpSchema(logger, source),
});
}
}

View File

@ -0,0 +1,140 @@
import { createAllTools, createOrchestratorDomainTools } from '..';
import type { InstanceAiContext } from '../../types';
jest.mock('../../parsers/structured-file-parser', () => ({
isStructuredAttachment: jest.fn(() => false),
}));
jest.mock('../attachments/parse-file.tool', () => ({
createParseFileTool: jest.fn(() => ({ id: 'parse-file' })),
}));
jest.mock('../credentials.tool', () => ({
createCredentialsTool: jest.fn(() => ({ id: 'credentials' })),
}));
jest.mock('../data-tables.tool', () => ({
createDataTablesTool: jest.fn((_context: unknown, scope?: string) => ({
id: scope ? `data-tables-${scope}` : 'data-tables',
})),
}));
jest.mock('../executions.tool', () => ({
createExecutionsTool: jest.fn(() => ({ id: 'executions' })),
}));
jest.mock('../nodes.tool', () => ({
createNodesTool: jest.fn((_context: unknown, scope?: string) => ({
id: scope ? `nodes-${scope}` : 'nodes',
})),
}));
jest.mock('../orchestration/browser-credential-setup.tool', () => ({
createBrowserCredentialSetupTool: jest.fn(() => ({ id: 'browser-credential-setup' })),
}));
jest.mock('../orchestration/build-workflow-agent.tool', () => ({
createBuildWorkflowAgentTool: jest.fn(() => ({ id: 'build-workflow-with-agent' })),
}));
jest.mock('../orchestration/complete-checkpoint.tool', () => ({
createCompleteCheckpointTool: jest.fn(() => ({ id: 'complete-checkpoint' })),
}));
jest.mock('../orchestration/delegate.tool', () => ({
createDelegateTool: jest.fn(() => ({ id: 'delegate' })),
}));
jest.mock('../orchestration/plan-with-agent.tool', () => ({
createPlanWithAgentTool: jest.fn(() => ({ id: 'plan' })),
}));
jest.mock('../orchestration/plan.tool', () => ({
createPlanTool: jest.fn(() => ({ id: 'create-tasks' })),
}));
jest.mock('../orchestration/report-verification-verdict.tool', () => ({
createReportVerificationVerdictTool: jest.fn(() => ({ id: 'report-verification-verdict' })),
}));
jest.mock('../orchestration/verify-built-workflow.tool', () => ({
createVerifyBuiltWorkflowTool: jest.fn(() => ({ id: 'verify-built-workflow' })),
}));
jest.mock('../research.tool', () => ({
createResearchTool: jest.fn(() => ({ id: 'research' })),
}));
jest.mock('../shared/ask-user.tool', () => ({
createAskUserTool: jest.fn(() => ({ id: 'ask-user' })),
}));
jest.mock('../task-control.tool', () => ({
createTaskControlTool: jest.fn(() => ({ id: 'task-control' })),
}));
jest.mock('../workflows/apply-workflow-credentials.tool', () => ({
createApplyWorkflowCredentialsTool: jest.fn(() => ({ id: 'apply-workflow-credentials' })),
}));
jest.mock('../workflows/build-workflow.tool', () => ({
createBuildWorkflowTool: jest.fn(() => ({ id: 'build-workflow' })),
}));
jest.mock('../workflows.tool', () => ({
createWorkflowsTool: jest.fn((_context: unknown, scope?: string) => ({
id: scope ? `workflows-${scope}` : 'workflows',
})),
}));
jest.mock('../workspace.tool', () => ({
createWorkspaceTool: jest.fn(() => ({ id: 'workspace' })),
}));
function makeContext(): InstanceAiContext {
return {
userId: 'user-a',
logger: { warn: jest.fn() },
} as unknown as InstanceAiContext;
}
describe('domain tool construction', () => {
beforeEach(() => {
jest.clearAllMocks();
});
it('creates the native full domain tool map', () => {
const context = makeContext();
const domainTools = createAllTools(context);
expect(domainTools).toMatchObject({
workflows: { id: 'workflows' },
executions: { id: 'executions' },
credentials: { id: 'credentials' },
'data-tables': { id: 'data-tables' },
workspace: { id: 'workspace' },
research: { id: 'research' },
nodes: { id: 'nodes' },
'ask-user': { id: 'ask-user' },
'build-workflow': { id: 'build-workflow' },
});
});
it('creates the native orchestrator domain tool map', () => {
const context = makeContext();
const orchestratorTools = createOrchestratorDomainTools(context);
expect(orchestratorTools).toMatchObject({
workflows: { id: 'workflows-orchestrator' },
executions: { id: 'executions' },
credentials: { id: 'credentials' },
'data-tables': { id: 'data-tables-orchestrator' },
workspace: { id: 'workspace' },
research: { id: 'research' },
nodes: { id: 'nodes-orchestrator' },
'ask-user': { id: 'ask-user' },
});
});
});

View File

@ -11,6 +11,8 @@ import type { InstanceAiContext } from '../types';
// ── Constants ──────────────────────────────────────────────────────────────
export const CREDENTIALS_TOOL_ID = 'credentials';
const DEFAULT_LIMIT = 50;
/** Generic auth types that should be excluded from search results — the AI should prefer dedicated types. */
@ -340,7 +342,7 @@ async function handleTest(context: InstanceAiContext, input: Extract<Input, { ac
export function createCredentialsTool(context: InstanceAiContext) {
return createTool({
id: 'credentials',
id: CREDENTIALS_TOOL_ID,
description:
'Manage credentials — list, get, delete, search available types, set up new credentials, and test connections.',
inputSchema,

View File

@ -12,6 +12,8 @@ import type { InstanceAiContext } from '../types';
// ── Shared schemas ─────────────────────────────────────────────────────────
export const DATA_TABLES_TOOL_ID = 'data-tables';
const columnTypeSchema = z.enum(['string', 'number', 'boolean', 'date']);
const filterSchema = z.object({
@ -597,7 +599,7 @@ export function createDataTablesTool(
const inputSchema = sanitizeInputSchema(z.discriminatedUnion('action', [...readOnlyActions]));
return createTool({
id: 'data-tables',
id: DATA_TABLES_TOOL_ID,
description: 'Manage data tables — list, get schema, and query rows.',
inputSchema,
execute: async (input: ReadOnlyInput) => {
@ -616,7 +618,7 @@ export function createDataTablesTool(
const inputSchema = sanitizeInputSchema(z.discriminatedUnion('action', [...allActions]));
return createTool({
id: 'data-tables',
id: DATA_TABLES_TOOL_ID,
description: 'Manage data tables — list, query, create, modify columns, and manage rows.',
inputSchema,
suspendSchema: confirmationSuspendSchema,

View File

@ -21,6 +21,11 @@ const CONFIRMATION_PAYLOAD = {
options: ['denyOnce', 'allowOnce', 'allowForSession'],
};
const CONFIRMATION_PAYLOAD_WITH_UNSUPPORTED_OPTION = {
...CONFIRMATION_PAYLOAD,
options: ['denyOnce', 'alwaysAllow', 'allowOnce'],
};
const PLAIN_CONFIRMATION_ERROR: McpToolCallResult = {
content: [
{
@ -31,6 +36,18 @@ const PLAIN_CONFIRMATION_ERROR: McpToolCallResult = {
isError: true,
};
const PLAIN_CONFIRMATION_ERROR_WITH_UNSUPPORTED_OPTION: McpToolCallResult = {
content: [
{
type: 'text',
text: `${GATEWAY_CONFIRMATION_REQUIRED_PREFIX}${JSON.stringify(
CONFIRMATION_PAYLOAD_WITH_UNSUPPORTED_OPTION,
)}`,
},
],
isError: true,
};
const JSON_ENVELOPE_CONFIRMATION_ERROR: McpToolCallResult = {
content: [
{
@ -108,6 +125,114 @@ describe('createToolsFromLocalMcpServer', () => {
expect(() => createToolsFromLocalMcpServer(server)).not.toThrow();
expect(createToolsFromLocalMcpServer(server)['bad_tool']).toBeDefined();
});
it('skips tools with invalid names', () => {
const logger = { warn: jest.fn() };
const server = makeMockServer([
{ ...SAMPLE_TOOL, name: 'bad tool' },
{ ...SAMPLE_TOOL, name: 'read_file' },
]);
const tools = createToolsFromLocalMcpServer(server, logger as never);
expect(tools['bad tool']).toBeUndefined();
expect(tools.read_file).toBeDefined();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped local gateway MCP tool with unsafe name',
expect.objectContaining({
source: 'local gateway MCP',
toolName: 'bad tool',
}),
);
});
it('skips tools with unsafe object key names', () => {
const logger = { warn: jest.fn() };
const server = makeMockServer([
{ ...SAMPLE_TOOL, name: 'constructor' },
{ ...SAMPLE_TOOL, name: 'read_file' },
]);
const tools = createToolsFromLocalMcpServer(server, logger as never);
expect(Object.prototype.hasOwnProperty.call(tools, 'constructor')).toBe(false);
expect(tools.read_file).toBeDefined();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped local gateway MCP tool with unsafe name',
expect.objectContaining({
source: 'local gateway MCP',
toolName: 'constructor',
}),
);
});
it('skips normalized name collisions between local gateway tools', () => {
const logger = { warn: jest.fn() };
const server = makeMockServer([
{ ...SAMPLE_TOOL, name: 'custom_tool' },
{ ...SAMPLE_TOOL, name: 'custom-tool' },
]);
const tools = createToolsFromLocalMcpServer(server, logger as never);
expect(tools.custom_tool).toBeDefined();
expect(tools['custom-tool']).toBeUndefined();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped local gateway MCP tool with unsafe name',
expect.objectContaining({
source: 'local gateway MCP',
toolName: 'custom-tool',
}),
);
});
it('skips compatibility-normalized non-ASCII tool names', () => {
const logger = { warn: jest.fn() };
const server = makeMockServer([
{ ...SAMPLE_TOOL, name: '' },
{ ...SAMPLE_TOOL, name: 'read_file' },
]);
const tools = createToolsFromLocalMcpServer(server, logger as never);
expect(tools['']).toBeUndefined();
expect(tools.read_file).toBeDefined();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped local gateway MCP tool with unsafe name',
expect.objectContaining({
source: 'local gateway MCP',
toolName: '',
}),
);
});
it('skips oversized raw schemas before tool construction', () => {
const logger = { warn: jest.fn() };
const properties = Object.fromEntries(
Array.from({ length: 251 }, (_, index) => [`field_${index}`, { type: 'string' }]),
);
const server = makeMockServer([
{
...SAMPLE_TOOL,
name: 'huge_tool',
inputSchema: { type: 'object', properties },
},
{ ...SAMPLE_TOOL, name: 'read_file' },
]);
const tools = createToolsFromLocalMcpServer(server, logger as never);
expect(tools.huge_tool).toBeUndefined();
expect(tools.read_file).toBeDefined();
expect(logger.warn).toHaveBeenCalledWith(
'Skipped local gateway MCP tool with unsupported schema',
expect.objectContaining({
source: 'local gateway MCP',
toolName: 'huge_tool',
limitType: 'objectProperties',
}),
);
});
});
describe('execute — first-call path', () => {
@ -169,6 +294,22 @@ describe('createToolsFromLocalMcpServer', () => {
});
});
it('filters unsupported confirmation options after parsing the daemon payload', async () => {
const server = makeMockServer();
server.callTool.mockResolvedValue(PLAIN_CONFIRMATION_ERROR_WITH_UNSUPPORTED_OPTION);
const suspend = jest.fn().mockResolvedValue(undefined);
const execute = getExecute(server);
await execute({ filePath: 'test.ts' }, makeCtx({ suspend }));
expect(suspend).toHaveBeenCalledTimes(1);
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
expect(suspend.mock.calls[0][0].resourceDecision).toMatchObject({
...CONFIRMATION_PAYLOAD,
options: ['denyOnce', 'allowOnce'],
});
});
it('calls suspend() for a JSON-envelope GATEWAY_CONFIRMATION_REQUIRED error', async () => {
const server = makeMockServer();
server.callTool.mockResolvedValue(JSON_ENVELOPE_CONFIRMATION_ERROR);

View File

@ -12,7 +12,18 @@ import { z } from 'zod';
import { convertJsonSchemaToZod } from 'zod-from-json-schema-v3';
import type { JSONSchema } from 'zod-from-json-schema-v3';
import { sanitizeMcpToolSchemas } from '../../agent/sanitize-mcp-schemas';
import {
addSafeMcpTools,
createClaimedToolNames,
McpToolNameValidationError,
validateMcpToolName,
} from '../../agent/mcp-tool-name-validation';
import {
assertMcpJsonSchemaWithinLimits,
McpSchemaSanitizationError,
sanitizeMcpToolSchemas,
} from '../../agent/sanitize-mcp-schemas';
import type { Logger } from '../../logger';
import type { LocalMcpServer } from '../../types';
// ---------------------------------------------------------------------------
@ -27,15 +38,28 @@ const gatewayConfirmationSuspendSchema = z.object({
resourceDecision: gatewayConfirmationRequiredPayloadSchema,
});
const gatewayResourceDecisionSchema = z.enum(['denyOnce', 'allowOnce', 'allowForSession']);
const gatewayConfirmationRequiredWirePayloadSchema =
gatewayConfirmationRequiredPayloadSchema.extend({
options: z.array(z.string()),
});
const gatewayConfirmationResumeSchema = z.object({
approved: z.boolean(),
resourceDecision: z.string().optional(),
resourceDecision: gatewayResourceDecisionSchema.optional(),
});
// ---------------------------------------------------------------------------
// Helper
// ---------------------------------------------------------------------------
function isGatewayResourceDecision(
option: string,
): option is z.infer<typeof gatewayResourceDecisionSchema> {
return gatewayResourceDecisionSchema.safeParse(option).success;
}
function tryParseGatewayConfirmationRequired(
result: McpToolCallResult,
): GatewayConfirmationRequiredPayload | null {
@ -66,8 +90,13 @@ function tryParseGatewayConfirmationRequired(
const json = JSON.parse(
candidate.slice(GATEWAY_CONFIRMATION_REQUIRED_PREFIX.length),
) as unknown;
const parsed = gatewayConfirmationRequiredPayloadSchema.safeParse(json);
return parsed.success ? parsed.data : null;
const parsed = gatewayConfirmationRequiredWirePayloadSchema.safeParse(json);
if (!parsed.success) return null;
const options = parsed.data.options.filter(isGatewayResourceDecision);
if (options.length === 0) return null;
return { ...parsed.data, options };
} catch {
return null;
}
@ -77,6 +106,33 @@ function tryParseGatewayConfirmationRequired(
// Factory
// ---------------------------------------------------------------------------
const LOCAL_GATEWAY_MCP_SOURCE = 'local gateway MCP';
function warnSkippedLocalMcpSchema(logger: Logger | undefined) {
return (error: McpSchemaSanitizationError) => {
logger?.warn('Skipped local gateway MCP tool with unsupported schema', {
toolName: error.details.toolName,
source: LOCAL_GATEWAY_MCP_SOURCE,
path: error.details.path,
depth: error.details.depth,
maxDepth: error.details.maxDepth,
limitType: error.details.limitType,
limit: error.details.limit,
reason: error.message,
});
};
}
function warnSkippedLocalMcpTool(logger: Logger | undefined) {
return (error: McpToolNameValidationError) => {
logger?.warn('Skipped local gateway MCP tool with unsafe name', {
toolName: error.toolName,
source: error.source,
reason: error.message,
});
};
}
/**
* Build Mastra tools dynamically from the MCP tools advertised by a connected
* local MCP server (e.g. the computer-use daemon).
@ -94,13 +150,40 @@ function tryParseGatewayConfirmationRequired(
* The `toModelOutput` callback converts MCP content blocks (text and image)
* into the AI SDK's multimodal format so the LLM receives images.
*/
export function createToolsFromLocalMcpServer(server: LocalMcpServer): ToolsInput {
export function createToolsFromLocalMcpServer(server: LocalMcpServer, logger?: Logger): ToolsInput {
const tools: ToolsInput = {};
const claimedToolNames = createClaimedToolNames([]);
const warnTool = warnSkippedLocalMcpTool(logger);
const warnSchema = warnSkippedLocalMcpSchema(logger);
for (const mcpTool of server.getAvailableTools()) {
const toolName = mcpTool.name;
const description = mcpTool.description ?? toolName;
try {
const normalizedName = validateMcpToolName(toolName, LOCAL_GATEWAY_MCP_SOURCE);
const claimedBy = claimedToolNames.get(normalizedName);
if (claimedBy) {
throw new McpToolNameValidationError(
`MCP tool "${toolName}" from ${LOCAL_GATEWAY_MCP_SOURCE} conflicts with "${claimedBy}"`,
toolName,
LOCAL_GATEWAY_MCP_SOURCE,
);
}
assertMcpJsonSchemaWithinLimits(mcpTool.inputSchema, { toolName });
claimedToolNames.set(normalizedName, toolName);
} catch (error) {
if (error instanceof McpToolNameValidationError) {
warnTool(error);
continue;
}
if (error instanceof McpSchemaSanitizationError) {
warnSchema(error);
continue;
}
throw error;
}
let inputSchema: z.ZodTypeAny;
try {
// Convert JSON Schema → Zod (v3) so the LLM sees the actual parameter shapes.
@ -208,5 +291,14 @@ export function createToolsFromLocalMcpServer(server: LocalMcpServer): ToolsInpu
tools[toolName] = tool;
}
return sanitizeMcpToolSchemas(tools);
const sanitizedTools = sanitizeMcpToolSchemas(tools, {
onError: warnSkippedLocalMcpSchema(logger),
});
const safeTools: ToolsInput = {};
addSafeMcpTools(safeTools, sanitizedTools, {
source: LOCAL_GATEWAY_MCP_SOURCE,
claimedToolNames: createClaimedToolNames([]),
warn: warnTool,
});
return safeTools;
}

View File

@ -1,10 +1,11 @@
import type { ToolsInput } from '@mastra/core/agent';
import { isStructuredAttachment } from '../parsers/structured-file-parser';
import type { InstanceAiContext, OrchestrationContext } from '../types';
import { createParseFileTool } from './attachments/parse-file.tool';
import { createCredentialsTool } from './credentials.tool';
import { createDataTablesTool } from './data-tables.tool';
import { createExecutionsTool } from './executions.tool';
import { createToolsFromLocalMcpServer } from './filesystem/create-tools-from-mcp-server';
import { createNodesTool } from './nodes.tool';
import { createBrowserCredentialSetupTool } from './orchestration/browser-credential-setup.tool';
import { createBuildWorkflowAgentTool } from './orchestration/build-workflow-agent.tool';
@ -26,7 +27,7 @@ import { createWorkspaceTool } from './workspace.tool';
* Creates all native n8n domain tools with the full action surface.
* Used for delegate/builder tool resolution sub-agents get unrestricted access.
*/
export function createAllTools(context: InstanceAiContext) {
export function createAllTools(context: InstanceAiContext): ToolsInput {
return {
workflows: createWorkflowsTool(context),
executions: createExecutionsTool(context),
@ -37,7 +38,6 @@ export function createAllTools(context: InstanceAiContext) {
nodes: createNodesTool(context),
'ask-user': createAskUserTool(),
'build-workflow': createBuildWorkflowTool(context),
...(context.localMcpServer ? createToolsFromLocalMcpServer(context.localMcpServer) : {}),
...(context.currentUserAttachments?.some(isStructuredAttachment)
? { 'parse-file': createParseFileTool(context) }
: {}),
@ -48,7 +48,7 @@ export function createAllTools(context: InstanceAiContext) {
* Creates orchestrator-scoped domain tools restricted action surfaces
* for tools where the orchestrator should not have write/builder access.
*/
export function createOrchestratorDomainTools(context: InstanceAiContext) {
export function createOrchestratorDomainTools(context: InstanceAiContext): ToolsInput {
return {
workflows: createWorkflowsTool(context, 'orchestrator'),
executions: createExecutionsTool(context),
@ -58,7 +58,6 @@ export function createOrchestratorDomainTools(context: InstanceAiContext) {
research: createResearchTool(context),
nodes: createNodesTool(context, 'orchestrator'),
'ask-user': createAskUserTool(),
...(context.localMcpServer ? createToolsFromLocalMcpServer(context.localMcpServer) : {}),
};
}

View File

@ -11,7 +11,13 @@ jest.mock('@mastra/core/tools', () => ({
import type { OrchestrationContext, PlannedTaskGraph, PlannedTaskService } from '../../../types';
const { __testClearPlannedTaskGraph, __testFormatMessagesForBriefing } =
const {
__testBuildPlannerBriefingContext,
__testClearPlannedTaskGraph,
__testFormatMessagesForBriefing,
__testGetRecentMessages,
__testGetPriorToolObservations,
} =
// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/consistent-type-imports
require('../plan-with-agent.tool') as typeof import('../plan-with-agent.tool');
@ -130,4 +136,263 @@ describe('formatMessagesForBriefing', () => {
expect(briefing).toMatch(/<current-datetime>[^<]+<\/current-datetime>/);
expect(briefing).not.toContain('<user-timezone>');
});
it('renders already-collected answers and discovered resources as dedicated sections', () => {
const briefing = __testFormatMessagesForBriefing(
[{ role: 'user', content: 'Build a Slack to-do agent' }],
undefined,
'America/New_York',
{
collectedAnswers: [
'How often should the agent run?: Every morning',
'Credential selected for slackApi: Slack account (slackApi)',
],
discoveredResources: ['Credentials available: Slack account (slackApi)'],
},
);
expect(briefing).toContain('## Already-collected answers');
expect(briefing).toContain('- How often should the agent run?: Every morning');
expect(briefing).toContain('- Credential selected for slackApi: Slack account (slackApi)');
expect(briefing).toContain('## Already-discovered resources');
expect(briefing).toContain('- Credentials available: Slack account (slackApi)');
});
});
describe('buildPlannerBriefingContext', () => {
it('extracts ask-user answers and credential selections from prior tool results', () => {
const context = __testBuildPlannerBriefingContext([
{
toolName: 'credentials',
args: { action: 'list' },
result: {
credentials: [
{ id: 'cred-slack', name: 'Slack account', type: 'slackApi' },
{ id: 'cred-anthropic', name: 'Anthropic account', type: 'anthropicApi' },
],
},
},
{
toolName: 'ask-user',
args: {
questions: [
{
id: 'schedule',
question: 'How often should the agent run?',
type: 'single',
},
],
},
result: {
answered: true,
answers: [
{
questionId: 'schedule',
selectedOptions: ['Every morning'],
},
],
},
},
{
toolName: 'credentials',
args: { action: 'setup' },
result: {
success: true,
credentials: { slackApi: 'cred-slack' },
},
},
]);
expect(context.collectedAnswers).toEqual([
'How often should the agent run?: Every morning',
'Credential selected for slackApi: Slack account (slackApi)',
]);
expect(context.discoveredResources).toEqual([
'Credentials available: Slack account (slackApi), Anthropic account (anthropicApi)',
]);
});
it('ignores unanswered and skipped ask-user answers', () => {
const context = __testBuildPlannerBriefingContext([
{
toolName: 'ask-user',
args: {
questions: [{ id: 'purpose', question: 'What should this do?', type: 'text' }],
},
result: {
answered: false,
answers: [
{
questionId: 'purpose',
customText: 'This should not be used',
},
],
},
},
{
toolName: 'ask-user',
args: {
questions: [
{ id: 'schedule', question: 'How often should it run?', type: 'single' },
{ id: 'model', question: 'Which model should it use?', type: 'single' },
],
},
result: {
answered: true,
answers: [
{
questionId: 'schedule',
selectedOptions: ['Every morning'],
skipped: true,
},
{
questionId: 'model',
selectedOptions: ['Anthropic'],
},
],
},
},
]);
expect(context.collectedAnswers).toEqual(['Which model should it use?: Anthropic']);
expect(context.discoveredResources).toEqual([]);
});
});
describe('getPriorToolObservations', () => {
it('reads tool results across the current message group when available', () => {
const askUserCall = {
questions: [{ id: 'purpose', question: 'What should this do?', type: 'text' }],
};
const askUserResult = {
answered: true,
answers: [
{ questionId: 'purpose', question: 'What should this do?', customText: 'Email me' },
],
};
const getEventsForRun = jest.fn().mockReturnValue([]);
const getEventsForRuns = jest.fn().mockReturnValue([
{
type: 'tool-call',
runId: 'run-prior',
agentId: 'orchestrator',
payload: {
toolCallId: 'tool-1',
toolName: 'ask-user',
args: askUserCall,
},
},
{
type: 'tool-result',
runId: 'run-prior',
agentId: 'orchestrator',
payload: {
toolCallId: 'tool-1',
result: askUserResult,
},
},
]);
const context = {
threadId: 'thread-1',
runId: 'run-current',
messageGroupId: 'message-group-1',
eventBus: {
getEventsAfter: jest.fn().mockReturnValue([
{
id: 1,
event: {
type: 'run-start',
runId: 'run-prior',
agentId: 'orchestrator',
payload: { messageId: 'message-1', messageGroupId: 'message-group-1' },
},
},
{
id: 2,
event: {
type: 'run-start',
runId: 'run-other',
agentId: 'orchestrator',
payload: { messageId: 'message-2', messageGroupId: 'message-group-2' },
},
},
]),
getEventsForRuns,
getEventsForRun,
},
} as unknown as OrchestrationContext;
const observations = __testGetPriorToolObservations(context);
expect(getEventsForRuns).toHaveBeenCalledWith('thread-1', ['run-prior', 'run-current']);
expect(getEventsForRun).not.toHaveBeenCalled();
expect(observations).toEqual([
{
toolName: 'ask-user',
args: askUserCall,
result: askUserResult,
},
]);
});
it('pairs out-of-order tool results with their later tool calls', () => {
const args = { action: 'list' };
const result = { credentials: [{ id: 'cred-1', name: 'Slack', type: 'slackApi' }] };
const context = {
threadId: 'thread-1',
runId: 'run-current',
eventBus: {
getEventsForRun: jest.fn().mockReturnValue([
{
type: 'tool-result',
runId: 'run-current',
agentId: 'orchestrator',
payload: { toolCallId: 'tool-1', result },
},
{
type: 'tool-call',
runId: 'run-current',
agentId: 'orchestrator',
payload: { toolCallId: 'tool-1', toolName: 'credentials', args },
},
]),
},
} as unknown as OrchestrationContext;
expect(__testGetPriorToolObservations(context)).toEqual([
{ toolName: 'credentials', args, result },
]);
});
it('returns no observations when event lookup fails', () => {
const context = {
threadId: 'thread-1',
runId: 'run-current',
eventBus: {
getEventsForRun: jest.fn(() => {
throw new Error('storage unavailable');
}),
},
} as unknown as OrchestrationContext;
expect(__testGetPriorToolObservations(context)).toEqual([]);
});
});
describe('getRecentMessages', () => {
it('does not append the current user message when memory already returned it', async () => {
const context = {
threadId: 't-1',
currentUserMessage: 'Build a Slack to-do agent',
memory: {
recall: jest.fn().mockResolvedValue({
messages: [{ role: 'user', content: 'Build a Slack to-do agent' }],
}),
},
} as unknown as OrchestrationContext;
const messages = await __testGetRecentMessages(context, 5);
expect(messages).toEqual([{ role: 'user', content: 'Build a Slack to-do agent' }]);
});
});

View File

@ -115,7 +115,10 @@ export function createBrowserCredentialSetupTool(context: OrchestrationContext)
if (gatewayBrowserTools.length > 0 && context.localMcpServer) {
// Gateway path: create Mastra tools from gateway, keep only browser category tools
const gatewayBrowserNames = new Set(gatewayBrowserTools.map((t) => t.name));
const allGatewayTools = createToolsFromLocalMcpServer(context.localMcpServer);
const allGatewayTools = createToolsFromLocalMcpServer(
context.localMcpServer,
context.logger,
);
for (const [name, tool] of Object.entries(allGatewayTools)) {
if (gatewayBrowserNames.has(name)) {
browserTools[name] = tool;

View File

@ -20,6 +20,7 @@ ${SUBAGENT_OUTPUT_CONTRACT}
- **Never ask about things you can discover** call \`credentials(action="list")\`, \`data-tables(action="list")\`, \`templates(action="best-practices")\` instead.
- **Never ask about implementation details** trigger types, node choices, schedule times, column names. Pick sensible defaults.
- **Never default resource identifiers** the user didn't mention (Slack channels, calendars, spreadsheets, folders, etc.) leave them for the builder to resolve at build time.
- **Trust already-collected briefing context** if the briefing includes an Already-collected answers or Already-discovered resources section, treat those entries as authoritative. Do not ask again for purpose, trigger, integrations, schedule, model, resource, or credential choices already listed there.
- **Do ask when the answer would significantly change the plan** e.g. the user's goal is ambiguous ("build me a CRM" for sales? support? recruiting?), or a business rule must come from the user ("what should happen when payment fails?").
- **Do ask when a required service has more than one credential of the same type** (e.g. two \`openAiApi\` accounts, three Google Calendar accounts) — which one to use cannot be discovered, only chosen. Record the chosen credential name in \`assumptions\`.
- **List your assumptions** on your first \`add-plan-item\` call. The user reviews the plan before execution and can reject/correct.

View File

@ -15,6 +15,7 @@
import { Agent } from '@mastra/core/agent';
import type { ToolsInput } from '@mastra/core/agent';
import { createTool } from '@mastra/core/tools';
import type { InstanceAiEvent } from '@n8n/api-types';
import { DateTime } from 'luxon';
import { nanoid } from 'nanoid';
import { z } from 'zod';
@ -37,6 +38,9 @@ import { createLlmStepTraceHooks } from '../../runtime/resumable-stream-executor
import { consumeStreamWithHitl } from '../../stream/consume-with-hitl';
import { getTraceParentRun, withTraceParentContext } from '../../tracing/langsmith-tracing';
import type { OrchestrationContext } from '../../types';
import { CREDENTIALS_TOOL_ID } from '../credentials.tool';
import { DATA_TABLES_TOOL_ID } from '../data-tables.tool';
import { ASK_USER_TOOL_ID } from '../shared/ask-user.tool';
import { createTemplatesTool } from '../templates.tool';
/** Number of recent thread messages to include as planner context. */
@ -48,15 +52,43 @@ const PLANNER_DOMAIN_TOOL_NAMES = ['nodes', 'credentials', 'data-tables', 'workf
/** Research tools added when available. */
const PLANNER_RESEARCH_TOOL_NAMES = ['research'];
const RELEVANT_PRIOR_TOOL_NAMES = new Set([
ASK_USER_TOOL_ID,
CREDENTIALS_TOOL_ID,
DATA_TABLES_TOOL_ID,
]);
// ---------------------------------------------------------------------------
// Message history retrieval
// ---------------------------------------------------------------------------
interface FormattedMessage {
role: string;
role: 'user' | 'assistant';
content: string;
}
interface PlannerBriefingContext {
collectedAnswers: string[];
discoveredResources: string[];
}
interface ToolObservation {
toolName: string;
args: Record<string, unknown>;
result: unknown;
}
interface CredentialBrief {
id?: string;
name: string;
type: string;
}
interface DataTableBrief {
id?: string;
name: string;
}
/** Extract plain text from Mastra memory content (string, array of parts, or {format, parts}). */
function extractTextFromMemoryContent(content: unknown): string {
if (typeof content === 'string') return content;
@ -93,6 +125,38 @@ function extractTextParts(parts: unknown[]): string {
.join('\n');
}
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value);
}
function readString(value: unknown): string | undefined {
return typeof value === 'string' && value.trim().length > 0 ? value : undefined;
}
function readRecord(value: unknown): Record<string, unknown> | undefined {
return isRecord(value) ? value : undefined;
}
function readArray(value: unknown): unknown[] {
return Array.isArray(value) ? value : [];
}
function readStringArray(value: unknown): string[] {
return readArray(value).filter((item): item is string => typeof item === 'string');
}
function addUnique(target: string[], seen: Set<string>, value: string | undefined): void {
if (!value || seen.has(value)) return;
seen.add(value);
target.push(value);
}
function summarizeList(values: string[], limit = 10): string {
const visible = values.slice(0, limit).join(', ');
const remaining = values.length - limit;
return remaining > 0 ? `${visible}, and ${remaining} more` : visible;
}
async function getRecentMessages(
context: OrchestrationContext,
count: number,
@ -120,17 +184,343 @@ async function getRecentMessages(
}
// Always append the current in-flight user message (not yet saved to memory)
if (context.currentUserMessage) {
if (shouldAppendCurrentUserMessage(messages, context.currentUserMessage)) {
messages.push({ role: 'user', content: context.currentUserMessage });
}
return messages;
}
function shouldAppendCurrentUserMessage(
messages: FormattedMessage[],
currentUserMessage?: string,
): currentUserMessage is string {
const current = currentUserMessage?.trim();
if (!current) return false;
const lastUserMessage = [...messages].reverse().find((message) => message.role === 'user');
return lastUserMessage?.content.trim() !== current;
}
/**
* Reconstructs prior planner-relevant tool calls from the event stream.
*
* Tool-call and tool-result events are correlated by `toolCallId` so the
* planner can receive structured context that is not preserved in text-only
* memory recall, such as ask-user answers and credential selections.
*/
function getPriorToolObservations(context: OrchestrationContext): ToolObservation[] {
type MutableToolObservation = Omit<ToolObservation, 'result'> & {
result: unknown;
hasResult: boolean;
};
const toolCalls = new Map<string, MutableToolObservation>();
const pendingResults = new Map<string, unknown>();
for (const event of getPriorToolEvents(context)) {
if (event.type === 'tool-call') {
const { toolCallId, toolName, args } = event.payload;
if (!RELEVANT_PRIOR_TOOL_NAMES.has(toolName)) continue;
const pendingResult = pendingResults.get(toolCallId);
toolCalls.set(toolCallId, {
toolName,
args,
result: pendingResult,
hasResult: pendingResults.has(toolCallId),
});
continue;
}
if (event.type === 'tool-result') {
const { toolCallId, result } = event.payload;
const existing = toolCalls.get(toolCallId);
if (existing) {
existing.result = result;
existing.hasResult = true;
} else {
pendingResults.set(toolCallId, result);
}
}
}
return [...toolCalls.values()]
.filter((observation) => observation.hasResult)
.map(({ toolName, args, result }) => ({ toolName, args, result }));
}
/**
* Returns the events that may contain prior tool context for this planner run.
*
* When the run belongs to a message group, all runs in that group are searched
* so follow-up runs can see choices collected earlier in the same assistant
* turn. If grouped lookup is unavailable, this falls back to the current run.
*/
function getPriorToolEvents(context: OrchestrationContext): InstanceAiEvent[] {
if (context.messageGroupId) {
const runIds = getMessageGroupRunIds(context);
if (runIds.length > 0) {
try {
return context.eventBus.getEventsForRuns(context.threadId, runIds);
} catch {
// Fall back to the current run below.
}
}
}
try {
return context.eventBus.getEventsForRun(context.threadId, context.runId);
} catch {
return [];
}
}
/**
* Finds run IDs that belong to the current message group from run-start events.
*
* The event bus can fetch events for many run IDs, but the orchestration
* context only carries the current run ID and message group ID. This bridges
* those two concepts while keeping the current run as a defensive fallback.
*/
function getMessageGroupRunIds(context: OrchestrationContext): string[] {
const messageGroupId = context.messageGroupId;
if (!messageGroupId) return [];
const runIds = new Set<string>();
try {
for (const { event } of context.eventBus.getEventsAfter(context.threadId, 0)) {
if (event.type === 'run-start' && event.payload.messageGroupId === messageGroupId) {
runIds.add(event.runId);
}
}
} catch {
return [context.runId];
}
runIds.add(context.runId);
return [...runIds];
}
/**
* Converts raw prior tool observations into planner briefing sections.
*
* The resulting strings are intentionally short and human-readable because
* they are embedded directly into the planner prompt under dedicated headings.
*/
function buildPlannerBriefingContext(observations: ToolObservation[]): PlannerBriefingContext {
const collectedAnswers: string[] = [];
const discoveredResources: string[] = [];
const seenAnswers = new Set<string>();
const seenResources = new Set<string>();
const credentialsById = buildCredentialLookup(observations);
for (const observation of observations) {
if (observation.toolName === ASK_USER_TOOL_ID) {
for (const answer of extractAskUserAnswerLines(observation)) {
addUnique(collectedAnswers, seenAnswers, answer);
}
continue;
}
if (observation.toolName === CREDENTIALS_TOOL_ID) {
const action = readString(observation.args.action);
if (action === 'list') {
addUnique(discoveredResources, seenResources, summarizeCredentials(observation.result));
}
if (action === 'setup') {
for (const selection of extractCredentialSelectionLines(observation, credentialsById)) {
addUnique(collectedAnswers, seenAnswers, selection);
}
}
continue;
}
if (
observation.toolName === DATA_TABLES_TOOL_ID &&
readString(observation.args.action) === 'list'
) {
addUnique(discoveredResources, seenResources, summarizeDataTables(observation.result));
}
}
return { collectedAnswers, discoveredResources };
}
/**
* Builds an ID lookup from prior credential list results.
*
* Credential setup results contain selected IDs, so this lets the briefing
* render stable user-facing names and credential types when a prior list result
* is available.
*/
function buildCredentialLookup(observations: ToolObservation[]): Map<string, CredentialBrief> {
const credentialsById = new Map<string, CredentialBrief>();
for (const observation of observations) {
if (observation.toolName !== CREDENTIALS_TOOL_ID) continue;
for (const credential of extractCredentials(observation.result)) {
if (credential.id) credentialsById.set(credential.id, credential);
}
}
return credentialsById;
}
/**
* Extracts answered ask-user responses as `question: answer` briefing lines.
*
* Skipped or unanswered prompts are ignored, and question text is recovered
* from tool args when the tool result only includes a question ID.
*/
function extractAskUserAnswerLines(observation: ToolObservation): string[] {
const result = readRecord(observation.result);
if (!result || result.answered === false) return [];
const questionsById = extractQuestionTextById(observation.args);
const answers = readArray(result.answers);
const lines: string[] = [];
for (const answerValue of answers) {
const answer = readRecord(answerValue);
if (!answer || answer.skipped === true) continue;
const questionId = readString(answer.questionId);
const question =
readString(answer.question) ?? (questionId ? questionsById.get(questionId) : undefined);
const selectedOptions = readStringArray(answer.selectedOptions);
const customText = readString(answer.customText);
const values = [...selectedOptions, ...(customText ? [customText] : [])];
if (!question || values.length === 0) continue;
lines.push(`${question}: ${values.join(', ')}`);
}
return lines;
}
/**
* Maps ask-user question IDs to display text from the original tool args.
*/
function extractQuestionTextById(args: Record<string, unknown>): Map<string, string> {
const questionsById = new Map<string, string>();
for (const questionValue of readArray(args.questions)) {
const question = readRecord(questionValue);
const id = readString(question?.id);
const text = readString(question?.question);
if (id && text) questionsById.set(id, text);
}
return questionsById;
}
/**
* Renders credential setup selections as briefing lines.
*
* The setup tool returns a `{ credentialType: credentialId }` map. The optional
* credential lookup turns those IDs back into names so the planner can avoid
* asking the user to choose the same credential again.
*/
function extractCredentialSelectionLines(
observation: ToolObservation,
credentialsById: Map<string, CredentialBrief>,
): string[] {
const result = readRecord(observation.result);
const credentials = readRecord(result?.credentials);
if (!credentials) return [];
const lines: string[] = [];
for (const [credentialType, credentialIdValue] of Object.entries(credentials)) {
const credentialId = readString(credentialIdValue);
if (!credentialId) continue;
const credential = credentialsById.get(credentialId);
const label = credential
? `${credential.name} (${credential.type})`
: `credential ID ${credentialId}`;
lines.push(`Credential selected for ${credentialType}: ${label}`);
}
return lines;
}
/**
* Summarizes a credentials list result for the briefing.
*/
function summarizeCredentials(result: unknown): string | undefined {
const credentials = extractCredentials(result);
if (credentials.length === 0) return undefined;
return `Credentials available: ${summarizeList(
credentials.map((credential) => `${credential.name} (${credential.type})`),
)}`;
}
/**
* Reads the minimal credential metadata needed by the planner briefing.
*/
function extractCredentials(result: unknown): CredentialBrief[] {
const record = readRecord(result);
return readArray(record?.credentials)
.map(readCredentialBrief)
.filter((credential): credential is CredentialBrief => credential !== undefined);
}
function readCredentialBrief(value: unknown): CredentialBrief | undefined {
const record = readRecord(value);
const name = readString(record?.name);
const type = readString(record?.type);
if (!name || !type) return undefined;
const id = readString(record?.id);
return {
name,
type,
...(id ? { id } : {}),
};
}
/**
* Summarizes a data-tables list result for the briefing.
*/
function summarizeDataTables(result: unknown): string | undefined {
const tables = extractDataTables(result);
if (tables.length === 0) return undefined;
return `Data tables available: ${summarizeList(tables.map((table) => table.name))}`;
}
/**
* Reads the minimal data-table metadata needed by the planner briefing.
*/
function extractDataTables(result: unknown): DataTableBrief[] {
const record = readRecord(result);
return readArray(record?.tables)
.map(readDataTableBrief)
.filter((table): table is DataTableBrief => table !== undefined);
}
function readDataTableBrief(value: unknown): DataTableBrief | undefined {
const record = readRecord(value);
const name = readString(record?.name);
if (!name) return undefined;
const id = readString(record?.id);
return {
name,
...(id ? { id } : {}),
};
}
/**
* Formats conversation, time, and already-collected context into the planner goal.
*/
function formatMessagesForBriefing(
messages: FormattedMessage[],
guidance?: string,
timeZone?: string,
briefingContext?: PlannerBriefingContext,
): string {
const parts: string[] = [];
@ -151,6 +541,20 @@ function formatMessagesForBriefing(
}
}
if (briefingContext?.collectedAnswers.length) {
parts.push('## Already-collected answers');
for (const answer of briefingContext.collectedAnswers) {
parts.push(`- ${answer}`);
}
}
if (briefingContext?.discoveredResources.length) {
parts.push('## Already-discovered resources');
for (const resource of briefingContext.discoveredResources) {
parts.push(`- ${resource}`);
}
}
if (guidance) {
parts.push(`\n## Orchestrator guidance\n${guidance}`);
}
@ -161,6 +565,9 @@ function formatMessagesForBriefing(
}
export const __testFormatMessagesForBriefing = formatMessagesForBriefing;
export const __testGetRecentMessages = getRecentMessages;
export const __testGetPriorToolObservations = getPriorToolObservations;
export const __testBuildPlannerBriefingContext = buildPlannerBriefingContext;
// ---------------------------------------------------------------------------
// Helper: clear draft checklist from taskStorage
@ -268,7 +675,13 @@ export function createPlanWithAgentTool(context: OrchestrationContext) {
// ── Retrieve conversation history ─────────────────────────────
const messages = await getRecentMessages(context, MESSAGE_HISTORY_COUNT);
const briefing = formatMessagesForBriefing(messages, input.guidance, context.timeZone);
const briefingContext = buildPlannerBriefingContext(getPriorToolObservations(context));
const briefing = formatMessagesForBriefing(
messages,
input.guidance,
context.timeZone,
briefingContext,
);
// ── IDs & events ──────────────────────────────────────────────
const subAgentId = `agent-planner-${nanoid(6)}`;

View File

@ -2,6 +2,8 @@ import { createTool } from '@mastra/core/tools';
import { nanoid } from 'nanoid';
import { z } from 'zod';
export const ASK_USER_TOOL_ID = 'ask-user';
const questionSchema = z.object({
id: z.string().describe('Unique question identifier'),
question: z.string().describe('The question text to display to the user'),
@ -36,7 +38,7 @@ export const askUserResumeSchema = z.object({
export function createAskUserTool() {
return createTool({
id: 'ask-user',
id: ASK_USER_TOOL_ID,
description:
'Ask the user one or more structured questions. Each question can be ' +
'single-select (pick one), multi-select (pick many), or free-text. ' +

View File

@ -828,6 +828,7 @@ describe('OutputParserStructured', () => {
await expect(execution).rejects.toThrow(
'Auto-fixing parser prompt has to contain {error} placeholder',
);
await expect(execution).rejects.toThrow(NodeOperationError);
});
it('should throw error when prompt template is empty', async () => {

View File

@ -100,6 +100,7 @@ export const GENERIC_OAUTH2_CREDENTIALS_WITH_EDITABLE_SCOPE = [
'microsoftOAuth2Api',
'highLevelOAuth2Api',
'mcpOAuth2Api',
'stravaOAuth2Api',
'wordpressOAuth2Api',
];

View File

@ -830,6 +830,71 @@ describe('OAuth2CredentialController', () => {
['csrfSecret'],
);
});
it('routes the decrypted response to saveDynamicCredential when origin is dynamic-credential', async () => {
const dynamicState = {
token: 'token',
cid: '1',
userId: '123',
origin: 'dynamic-credential' as const,
credentialResolverId: 'resolver-1',
authorizationHeader: 'Bearer caller-token',
authMetadata: { tenant: 'acme' },
createdAt: timestamp,
data: 'encrypted-data',
};
const mockGetToken = jest
.fn()
.mockResolvedValue({ data: { access_token: 'jwe-blob', refresh_token: 'r' } });
const { ClientOAuth2 } = await import('@n8n/client-oauth2');
jest
.mocked(ClientOAuth2)
.mockImplementation(() => ({ code: { getToken: mockGetToken } }) as any);
const mockResolvedCredential = mock<CredentialsEntity>({ id: '1' });
oauthService.resolveCredential.mockResolvedValueOnce([
mockResolvedCredential,
{ csrfSecret: 'csrf-secret' },
{
clientId: 'client_id',
clientSecret: 'client_secret',
authUrl: 'https://example.domain/oauth2/auth',
accessTokenUrl: 'https://example.domain/oauth2/token',
scope: 'openid',
grantType: 'authorizationCode',
authentication: 'header',
jweEnabled: true,
} as any,
dynamicState,
]);
oauthService.getBaseUrl.mockReturnValue('http://localhost:5678/rest/oauth2-credential');
externalHooks.run.mockResolvedValue(undefined);
oauthJweServiceProxy.decryptOAuth2TokenData.mockResolvedValue({
access_token: 'decrypted',
refresh_token: 'r',
});
const req = mock<OAuthRequest.OAuth2Credential.Callback>({
query: { code: 'auth_code', state: validState },
originalUrl: '/oauth2-credential/callback?code=auth_code&state=state',
});
await controller.handleCallback(req, res);
expect(oauthJweServiceProxy.decryptOAuth2TokenData).toHaveBeenCalledWith(
expect.objectContaining({ access_token: 'jwe-blob' }),
);
expect(oauthService.saveDynamicCredential).toHaveBeenCalledWith(
mockResolvedCredential,
expect.objectContaining({
oauthTokenData: expect.objectContaining({ access_token: 'decrypted' }),
}),
'caller-token',
'resolver-1',
{ tenant: 'acme' },
);
expect(oauthService.encryptAndSaveData).not.toHaveBeenCalled();
});
});
it('should handle errors and render error page', async () => {

View File

@ -1,117 +0,0 @@
import { mockInstance } from '@n8n/backend-test-utils';
import { GlobalConfig } from '@n8n/config';
import type { SqliteConfig } from '@n8n/config';
import type { IExecutionResponse } from '@n8n/db';
import { ExecutionEntity, ExecutionRepository } from '@n8n/db';
import { Container } from '@n8n/di';
import type { SelectQueryBuilder } from '@n8n/typeorm';
import { Not, LessThanOrEqual } from '@n8n/typeorm';
import { mock } from 'jest-mock-extended';
import { BinaryDataService } from 'n8n-core';
import type { IRunExecutionData, IWorkflowBase } from 'n8n-workflow';
import { nanoid } from 'nanoid';
import { mockEntityManager } from '@test/mocking';
describe('ExecutionRepository', () => {
const entityManager = mockEntityManager(ExecutionEntity);
const globalConfig = mockInstance(GlobalConfig, {
logging: { outputs: ['console'], scopes: [] },
});
const binaryDataService = mockInstance(BinaryDataService);
const executionRepository = Container.get(ExecutionRepository);
const mockDate = new Date('2023-12-28 12:34:56.789Z');
beforeAll(() => {
jest.clearAllMocks();
jest.useFakeTimers().setSystemTime(mockDate);
});
afterAll(() => jest.useRealTimers());
describe('getWaitingExecutions()', () => {
test.each(['sqlite', 'postgresdb'] as const)(
'on %s, should be called with expected args',
async (dbType) => {
globalConfig.database.type = dbType;
entityManager.find.mockResolvedValueOnce([]);
await executionRepository.getWaitingExecutions();
expect(entityManager.find).toHaveBeenCalledWith(ExecutionEntity, {
order: { waitTill: 'ASC' },
select: ['id', 'waitTill'],
where: {
status: Not('crashed'),
waitTill: LessThanOrEqual(
dbType === 'sqlite'
? '2023-12-28 12:36:06.789'
: new Date('2023-12-28T12:36:06.789Z'),
),
},
});
},
);
});
describe('deleteExecutionsByFilter', () => {
test('should delete binary data', async () => {
const workflowId = nanoid();
jest.spyOn(executionRepository, 'createQueryBuilder').mockReturnValue(
mock<SelectQueryBuilder<ExecutionEntity>>({
select: jest.fn().mockReturnThis(),
andWhere: jest.fn().mockReturnThis(),
getMany: jest.fn().mockResolvedValue([{ id: '1', workflowId }]),
}),
);
await executionRepository.deleteExecutionsByFilter({
filters: { id: '1' },
accessibleWorkflowIds: ['1'],
deleteConditions: { ids: ['1'] },
});
expect(binaryDataService.deleteMany).toHaveBeenCalledWith([
{ type: 'execution', executionId: '1', workflowId },
]);
});
});
describe('updateExistingExecution', () => {
test.each(['sqlite', 'postgresdb'] as const)(
'should update execution and data in transaction on %s',
async (dbType) => {
globalConfig.database.type = dbType;
globalConfig.database.sqlite = mock<SqliteConfig>({ poolSize: 1 });
const executionId = '1';
const execution = mock<IExecutionResponse>({
id: executionId,
data: mock<IRunExecutionData>(),
workflowData: mock<IWorkflowBase>(),
status: 'success',
});
const txCallback = jest.fn();
entityManager.transaction.mockImplementation(async (cb) => {
// @ts-expect-error Mock
await cb(entityManager);
txCallback();
});
// Mock update to return affected count
entityManager.update.mockResolvedValue({ affected: 1, raw: [], generatedMaps: [] });
await executionRepository.updateExistingExecution(executionId, execution);
expect(entityManager.transaction).toHaveBeenCalled();
expect(entityManager.update).toHaveBeenCalledWith(
ExecutionEntity,
{ id: executionId },
expect.objectContaining({ status: 'success' }),
);
expect(txCallback).toHaveBeenCalledTimes(1);
},
);
});
});

View File

@ -2,12 +2,12 @@ import type {
BuiltAgent,
BuiltTool,
CredentialProvider,
GenerateResult,
StreamChunk,
ToolDescriptor,
} from '@n8n/agents';
import {
AGENT_SCHEDULE_TRIGGER_TYPE,
AGENT_WORKFLOW_TRIGGER_TYPE,
isAgentCredentialIntegration,
isAgentScheduleIntegration,
type AgentSkill,
@ -336,6 +336,28 @@ export class AgentsService {
});
}
/**
* Same scoping as {@link findByUser}, but only returns agents that have a
* `publishedVersion`. Used by the MessageAnAgent node's listSearch so the
* dropdown can't surface unpublished agents `executeForWorkflow` rejects
* those at runtime, and showing them would just lead to a confusing
* "Agent is not published" error after the user picks one.
*/
async findPublishedByUser(userId: string): Promise<Agent[]> {
const projectRelations = await this.projectRelationRepository.findAllByUser(userId);
const projectIds = projectRelations.map((pr) => pr.projectId);
if (projectIds.length === 0) return [];
const agents = await this.agentRepository.find({
where: { projectId: In(projectIds) },
relations: { publishedVersion: true },
order: { updatedAt: 'DESC' },
});
return agents.filter((agent) => agent.publishedVersion);
}
async publishAgent(agentId: string, projectId: string, userId: string): Promise<Agent> {
const agent = await this.agentRepository.findByIdAndProjectId(agentId, projectId);
if (!agent) {
@ -1039,8 +1061,15 @@ export class AgentsService {
/**
* Execute an SDK agent within a workflow execution context.
* Compiles a fresh isolated agent per call for credential isolation
* (does not use or affect the shared runtime cache).
*
* Streams the run rather than calling `.generate()` so the same
* `ExecutionRecorder` used by chat/Slack/schedule paths can collect a full
* `MessageRecord` (timeline, tool calls, usage). Without this, sessions
* triggered from a workflow node never appear in the agent's session list
* because nothing creates the agent execution thread row.
*
* Compiles a fresh isolated agent per call for credential isolation (does
* not use or affect the shared runtime cache).
*/
async executeForWorkflow(
agentId: string,
@ -1071,77 +1100,104 @@ export class AgentsService {
throw new OperationalError(`Failed to compile agent: ${compiled.error ?? 'unknown error'}`);
}
const result = await compiled.agent.generate(message, {
persistence: {
resourceId: executionId,
threadId,
},
const agentInstance = compiled.agent;
const recorder = new ExecutionRecorder();
// `structuredOutput` and `toolCalls` aren't surfaced by the recorder —
// pull them off the `finish` chunk and the discrete `tool-result` chunks
// directly so the workflow node receives the same shape as before.
let structuredOutput: unknown | null = null;
const toolCalls: ExecuteAgentData['toolCalls'] = [];
const toolInputs = new Map<string, { toolName: string; input: unknown }>();
const resultStream = await agentInstance.stream(message, {
persistence: { resourceId: executionId, threadId },
});
// Check for errors
if (result.error) {
const errorMessage =
result.error instanceof Error
? result.error.message
: typeof result.error === 'string'
? result.error
: JSON.stringify(result.error);
throw new OperationalError(`Agent execution failed: ${errorMessage}`);
const reader = resultStream.stream.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
recorder.record(value);
if (value.type === 'tool-call') {
toolInputs.set(value.toolCallId, { toolName: value.toolName, input: value.input });
} else if (value.type === 'tool-result') {
const pending = toolInputs.get(value.toolCallId);
toolCalls.push({
toolName: value.toolName,
input: pending?.input ?? null,
result: value.output,
});
toolInputs.delete(value.toolCallId);
} else if (value.type === 'finish' && value.structuredOutput !== undefined) {
structuredOutput = value.structuredOutput;
}
}
} finally {
reader.releaseLock();
}
if (result.finishReason === 'error') {
throw new OperationalError('Agent execution finished with an error.');
}
const messageRecord = recorder.getMessageRecord();
if (result.pendingSuspend && result.pendingSuspend.length > 0) {
const toolNames = result.pendingSuspend
.map((s: { toolName: string }) => s.toolName)
.join(', ');
// Persist the thread + execution row + metadata so the session is
// listed under the agent (mirrors chat/slack/schedule recording).
// Fire-and-forget with .catch so a recording failure doesn't fail the
// workflow node — the response is already in hand.
void this.agentExecutionService
.recordMessage({
threadId,
agentId,
agentName: agentInstance.name,
projectId,
userMessage: message,
record: messageRecord,
source: AGENT_WORKFLOW_TRIGGER_TYPE,
})
.catch((error) => {
this.logger.warn('Failed to record agent execution from workflow', {
agentId,
threadId,
error: error instanceof Error ? error.message : String(error),
});
});
if (recorder.suspended) {
throw new OperationalError(
`Agent execution suspended waiting for tool approval: ${toolNames}. ` +
'Agent execution suspended waiting for tool approval. ' +
'Suspend/resume is not supported in workflow execution context.',
);
}
if (messageRecord.error) {
throw new OperationalError(`Agent execution failed: ${messageRecord.error}`);
}
if (messageRecord.finishReason === 'error') {
throw new OperationalError('Agent execution finished with an error.');
}
return {
response: this.extractTextResponse(result),
structuredOutput: result.structuredOutput ?? null,
usage: result.usage
response: messageRecord.assistantResponse,
structuredOutput: structuredOutput ?? null,
usage: messageRecord.usage
? {
promptTokens: result.usage.promptTokens,
completionTokens: result.usage.completionTokens,
totalTokens: result.usage.totalTokens,
promptTokens: messageRecord.usage.promptTokens,
completionTokens: messageRecord.usage.completionTokens,
totalTokens: messageRecord.usage.totalTokens,
}
: null,
toolCalls: (result.toolCalls ?? []).map(
(tc: { tool: string; input: unknown; output: unknown }) => ({
toolName: tc.tool,
input: tc.input,
result: tc.output,
}),
),
finishReason: result.finishReason ?? 'stop',
toolCalls,
finishReason: messageRecord.finishReason,
session: {
agentId,
projectId,
sessionId: threadId,
},
};
}
/**
* Extract the text response from the last assistant message in a GenerateResult.
*/
private extractTextResponse(result: GenerateResult): string {
for (let i = result.messages.length - 1; i >= 0; i--) {
const msg = result.messages[i];
if (msg.type !== 'custom' && msg.role === 'assistant' && Array.isArray(msg.content)) {
const textParts = (msg.content as Array<{ type: string; text?: string }>)
.filter((c): c is { type: 'text'; text: string } => c.type === 'text')
.map((c) => c.text);
if (textParts.length > 0) {
return textParts.join('');
}
}
}
return '';
}
/**
* Get the JSON config for an agent.
*/

View File

@ -855,8 +855,14 @@ describe('InstanceAiController', () => {
});
});
it('should return token and command', async () => {
it('should return token, command, and token expiry', async () => {
const nowSpy = jest
.spyOn(Date, 'now')
.mockReturnValue(new Date('2026-01-01T00:00:00.000Z').getTime());
instanceAiService.generatePairingToken.mockReturnValue('pairing-token');
instanceAiService.getGatewayApiKeyExpiresAt.mockReturnValue(
new Date('2026-01-01T00:05:00.000Z'),
);
urlService.getInstanceBaseUrl.mockReturnValue('https://myinstance.n8n.cloud');
const result = await controller.createGatewayLink(req);
@ -864,8 +870,15 @@ describe('InstanceAiController', () => {
expect(result).toEqual({
token: 'pairing-token',
command: 'npx @n8n/computer-use https://myinstance.n8n.cloud pairing-token',
expiresAt: '2026-01-01T00:05:00.000Z',
ttlSeconds: 300,
});
expect(instanceAiService.generatePairingToken).toHaveBeenCalledWith(USER_ID);
expect(instanceAiService.getGatewayApiKeyExpiresAt).toHaveBeenCalledWith(
USER_ID,
'pairing-token',
);
nowSpy.mockRestore();
});
});

View File

@ -30,11 +30,15 @@ describe('LocalGatewayRegistry — per-user gateway isolation', () => {
expect(token1).toBe(token2);
});
it('returns the active session key if one already exists', () => {
it('returns a pairing token instead of exposing an active session key', () => {
const pairingToken = registry.generatePairingToken('user-a');
const sessionKey = registry.consumePairingToken('user-a', pairingToken);
const sessionKey = registry.consumePairingToken('user-a', pairingToken)!;
const nextPairingToken = registry.generatePairingToken('user-a');
expect(registry.generatePairingToken('user-a')).toBe(sessionKey);
expect(nextPairingToken).toMatch(/^gw_/);
expect(nextPairingToken).not.toBe(sessionKey);
expect(registry.getUserIdForApiKey(sessionKey)).toBe('user-a');
expect(registry.getUserIdForApiKey(nextPairingToken)).toBe('user-a');
});
it('generates independent tokens for different users', () => {
@ -77,6 +81,16 @@ describe('LocalGatewayRegistry — per-user gateway isolation', () => {
});
describe('getPairingToken', () => {
it('returns the expiry time for an active pairing token', () => {
const nowSpy = jest.spyOn(Date, 'now').mockReturnValue(1_000);
const token = registry.generatePairingToken('user-a');
expect(registry.getApiKeyExpiresAt('user-a', token)?.toISOString()).toBe(
new Date(301_000).toISOString(),
);
nowSpy.mockRestore();
});
it('returns null and cleans up the reverse lookup for an expired token', () => {
const token = registry.generatePairingToken('user-a');
@ -91,6 +105,19 @@ describe('LocalGatewayRegistry — per-user gateway isolation', () => {
expect(registry.getPairingToken('user-a')).toBeNull();
expect(registry.getUserIdForApiKey(token)).toBeUndefined();
});
it('rejects an expired pairing token via getUserIdForApiKey without prior cleanup', () => {
const token = registry.generatePairingToken('user-a');
const userGateways = (
registry as unknown as {
userGateways: Map<string, { pairingToken: { token: string; createdAt: number } | null }>;
}
).userGateways;
userGateways.get('user-a')!.pairingToken!.createdAt = Date.now() - 10 * 60 * 1000;
expect(registry.getUserIdForApiKey(token)).toBeUndefined();
});
});
describe('getGatewayStatus', () => {

View File

@ -55,15 +55,23 @@ export class LocalGatewayRegistry {
/** Resolve an API key (pairing token or session key) back to the owning userId. */
getUserIdForApiKey(key: string): string | undefined {
return this.apiKeyToUserId.get(key);
const userId = this.apiKeyToUserId.get(key);
if (!userId) return undefined;
const state = this.userGateways.get(userId);
if (state?.pairingToken?.token === key) {
if (Date.now() - state.pairingToken.createdAt > PAIRING_TOKEN_TTL_MS) {
this.apiKeyToUserId.delete(state.pairingToken.token);
state.pairingToken = null;
return undefined;
}
}
return userId;
}
/** Generate a one-time pairing token for UI-initiated connections. */
generatePairingToken(userId: string): string {
const state = this.getOrCreate(userId);
// If there's an active session key, return it so the daemon can reconnect
// without losing its authenticated session (e.g. after a page reload).
if (state.activeSessionKey) return state.activeSessionKey;
// Reuse existing valid token to prevent race conditions between concurrent callers.
const existing = this.getPairingToken(userId);
@ -87,6 +95,15 @@ export class LocalGatewayRegistry {
return state.pairingToken.token;
}
/** Get the expiry time for an active pairing token. Session keys do not expire. */
getApiKeyExpiresAt(userId: string, key: string): Date | null {
const state = this.userGateways.get(userId);
if (!state?.pairingToken || state.pairingToken.token !== key) return null;
const token = this.getPairingToken(userId);
if (!token) return null;
return new Date(state.pairingToken.createdAt + PAIRING_TOKEN_TTL_MS);
}
/**
* Consume the pairing token and issue a long-lived session key.
* Returns the session key, or null if the token is invalid or expired.

View File

@ -631,9 +631,13 @@ export class InstanceAiController {
async createGatewayLink(req: AuthenticatedRequest) {
await this.assertGatewayEnabled(req.user.id);
const token = this.instanceAiService.generatePairingToken(req.user.id);
const expiresAt = this.instanceAiService.getGatewayApiKeyExpiresAt(req.user.id, token);
const ttlSeconds = expiresAt
? Math.max(0, Math.ceil((expiresAt.getTime() - Date.now()) / 1000))
: null;
const baseUrl = this.urlService.getInstanceBaseUrl();
const command = `npx @n8n/computer-use ${baseUrl} ${token}`;
return { token, command };
return { token, command, expiresAt: expiresAt?.toISOString() ?? null, ttlSeconds };
}
@Get('/gateway/events', { usesTemplates: true, skipAuth: true })

View File

@ -1276,6 +1276,10 @@ export class InstanceAiService {
return this.gatewayRegistry.generatePairingToken(userId);
}
getGatewayApiKeyExpiresAt(userId: string, key: string): Date | null {
return this.gatewayRegistry.getApiKeyExpiresAt(userId, key);
}
getPairingToken(userId: string): string | null {
return this.gatewayRegistry.getPairingToken(userId);
}

View File

@ -214,6 +214,7 @@ export class FrontendService {
nodeEnv: process.env.NODE_ENV,
versionCli: N8N_VERSION,
concurrency: this.globalConfig.executions.concurrency.productionLimit,
evaluationConcurrencyLimit: this.globalConfig.executions.concurrency.evaluationLimit,
authCookie: {
secure: this.globalConfig.auth.cookie.secure,
},

View File

@ -295,7 +295,11 @@ export async function executeAgent(
async function listAgents(userId: string): Promise<Array<{ id: string; name: string }>> {
const { AgentsService } = await import('@/modules/agents/agents.service');
const agentsService = Container.get(AgentsService);
const agents = await agentsService.findByUser(userId);
// Only published agents are runnable from a workflow — see the publish
// guard in `executeForWorkflow`. Filtering here keeps unpublished agents
// out of the MessageAnAgent dropdown so users don't pick one that would
// fail at execution time.
const agents = await agentsService.findPublishedByUser(userId);
return agents.map((agent) => ({ id: agent.id, name: agent.name }));
}

View File

@ -166,7 +166,7 @@ export class BaseExecuteContext extends NodeExecutionContext {
throw new OperationalError('Agent execution is not available in this context');
}
const threadId = `${executionId}-${itemIndex}`;
const threadId = agentInfo.sessionId?.trim() || `${executionId}-${itemIndex}`;
return await this.additionalData.executeAgent(
agentInfo.agentId,

View File

@ -72,7 +72,7 @@
// Canvas
--canvas--color--background: var(--color--neutral-125);
--canvas--dot--color: var(--color--neutral-500);
--canvas--read-only-line--color: var(--color--neutral-100);
--canvas--read-only-line--color: var(--color--neutral-200);
--canvas--color--selected: var(--color--neutral-150);
--canvas--color--selected-transparent: hsla(220, 47%, 30%, 0.1);
--canvas--label--color: var(--color--neutral-600);

View File

@ -1665,6 +1665,7 @@
"logs.overview.body.toggleRow": "Toggle row",
"logs.details.header.actions.input": "Input",
"logs.details.header.actions.output": "Output",
"logs.details.header.actions.viewAgentSession": "View session",
"logs.details.body.itemCount": "{count} item | {count} items",
"logs.details.body.multipleInputs": "Multiple inputs. View them by {button}",
"logs.details.body.multipleInputs.openingTheNode": "opening the node",
@ -4505,7 +4506,7 @@
"settings.encryptionKeys.description": "Data encryption keys protect credentials, variables, and other sensitive data at rest. Rotating generates a new active key. Past keys are archived and retained for audit.",
"settings.encryptionKeys.description.docsLink": "Learn more in documentation",
"settings.encryptionKeys.column.key": "Key",
"settings.encryptionKeys.column.type": "Type",
"settings.encryptionKeys.column.status": "Status",
"settings.encryptionKeys.column.activated": "Activated",
"settings.encryptionKeys.column.archived": "Archived",
"settings.encryptionKeys.status.active": "Active",
@ -5977,5 +5978,8 @@
"instanceAi.welcomeModal.gateway.instructions.mac": "Open Terminal (Cmd + Space, type \"Terminal\") and paste the command below.",
"instanceAi.welcomeModal.gateway.instructions.windows": "Open Terminal (Windows key, type \"Terminal\") and paste the command below.",
"instanceAi.welcomeModal.gateway.instructions.linux": "Open your terminal and paste the command below.",
"instanceAi.welcomeModal.gateway.tokenExpiresIn": "This token expires in {minutes} min.",
"instanceAi.welcomeModal.gateway.tokenExpired": "This token has expired. Copy the command again.",
"instanceAi.welcomeModal.gateway.leadingSpaceHint": "If your shell supports it, start the command with a space to keep it out of history.",
"instanceAi.welcomeModal.gateway.browserAutomationHint": "Want browser automation? Install the <a href=\"{url}\" target=\"_blank\" rel=\"noopener\">n8n Browser Use Chrome extension</a> so the agent can control your browser."
}

View File

@ -118,6 +118,7 @@ export const defaultSettings: FrontendSettings = {
nodeJsVersion: '',
nodeEnv: '',
concurrency: -1,
evaluationConcurrencyLimit: -1,
versionNotifications: {
enabled: true,
endpoint: '',

View File

@ -0,0 +1,140 @@
import { describe, it, expect } from 'vitest';
import { computed, defineComponent, h } from 'vue';
import { mount } from '@vue/test-utils';
import { createRouter, createWebHistory } from 'vue-router';
import type { ITaskData } from 'n8n-workflow';
import { MESSAGE_AN_AGENT_NODE_TYPE } from '@/app/constants/nodeTypes';
import { AGENT_SESSION_DETAIL_VIEW } from '@/features/agents/constants';
import type { LogEntry } from '@/features/execution/logs/logs.types';
import { useMessageAgentSessionLink } from '../composables/useMessageAgentSessionLink';
function makeLogEntry(overrides: Partial<LogEntry> = {}): LogEntry {
// Only the fields the composable reads matter; the rest is cast through to
// keep this fixture small and avoid pulling in a real Workflow factory.
const base = {
id: 'log-1',
runIndex: 0,
children: [],
consumedTokens: {
completionTokens: 0,
isEstimate: false,
promptTokens: 0,
totalTokens: 0,
},
executionId: 'exec-1',
execution: { resultData: { runData: {} } },
isSubExecution: false,
node: {
id: 'node-1',
name: 'Message an Agent',
type: MESSAGE_AN_AGENT_NODE_TYPE,
typeVersion: 1,
parameters: {},
position: [0, 0],
},
runData: undefined,
workflow: {},
};
return { ...base, ...overrides } as unknown as LogEntry;
}
function runWithRouter(
logEntry: { value: LogEntry | undefined },
registerSessionRoute: boolean,
): { link: () => ReturnType<typeof useMessageAgentSessionLink>['link']['value'] } {
const router = createRouter({
history: createWebHistory(),
routes: registerSessionRoute
? [
{
name: AGENT_SESSION_DETAIL_VIEW,
path: '/projects/:projectId/agents/:agentId/sessions/:threadId',
component: () => h('div'),
},
]
: [{ path: '/', component: () => h('div') }],
});
let captured: ReturnType<typeof useMessageAgentSessionLink>['link'] | null = null;
const Harness = defineComponent({
setup() {
captured = useMessageAgentSessionLink(computed(() => logEntry.value)).link;
return () => h('div');
},
});
mount(Harness, { global: { plugins: [router] } });
return { link: () => captured!.value };
}
const sessionRunData = {
executionStatus: 'success',
startTime: 0,
executionTime: 1,
source: [],
data: {
main: [
[
{
json: {
response: 'hi',
session: {
agentId: 'agent-1',
projectId: 'project-1',
sessionId: 'thread-1',
},
},
},
],
],
},
} as unknown as ITaskData;
describe('useMessageAgentSessionLink', () => {
it('returns a resolved href + open() for a messageAnAgent run with a session block', () => {
const logEntry = { value: makeLogEntry({ runData: sessionRunData }) };
const { link } = runWithRouter(logEntry, true);
const value = link();
expect(value).not.toBeNull();
expect(value!.href).toBe('/projects/project-1/agents/agent-1/sessions/thread-1');
expect(typeof value!.open).toBe('function');
});
it('returns null when the node-type is not messageAnAgent', () => {
const logEntry = {
value: makeLogEntry({
node: {
id: 'n',
name: 'Other',
type: 'n8n-nodes-base.set',
typeVersion: 1,
parameters: {},
position: [0, 0],
} as LogEntry['node'],
runData: sessionRunData,
}),
};
const { link } = runWithRouter(logEntry, true);
expect(link()).toBeNull();
});
it('returns null when run output has no session block', () => {
const noSession = {
...sessionRunData,
data: { main: [[{ json: { response: 'hi' } }]] },
} as unknown as ITaskData;
const logEntry = { value: makeLogEntry({ runData: noSession }) };
const { link } = runWithRouter(logEntry, true);
expect(link()).toBeNull();
});
it('returns null when the session route is not registered (graceful fallback)', () => {
const logEntry = { value: makeLogEntry({ runData: sessionRunData }) };
const { link } = runWithRouter(logEntry, false);
expect(link()).toBeNull();
});
});

View File

@ -0,0 +1,96 @@
import { computed, type ComputedRef } from 'vue';
import { useRouter } from 'vue-router';
import { MESSAGE_AN_AGENT_NODE_TYPE } from '@/app/constants/nodeTypes';
import { AGENT_SESSION_DETAIL_VIEW } from '@/features/agents/constants';
import type { LogEntry } from '@/features/execution/logs/logs.types';
/**
* Session identifiers the MessageAnAgent node emits in its output JSON. Kept
* structural so we don't have to import the runtime type from `n8n-workflow`
* just to read three string fields.
*/
type MessageAgentSession = {
agentId: string;
projectId: string;
sessionId: string;
};
function isMessageAgentSession(value: unknown): value is MessageAgentSession {
if (!value || typeof value !== 'object') return false;
const v = value as Record<string, unknown>;
return (
typeof v.agentId === 'string' &&
typeof v.projectId === 'string' &&
typeof v.sessionId === 'string' &&
v.agentId !== '' &&
v.projectId !== '' &&
v.sessionId !== ''
);
}
function extractSession(logEntry: LogEntry | undefined): MessageAgentSession | null {
if (!logEntry) return null;
if (logEntry.node.type !== MESSAGE_AN_AGENT_NODE_TYPE) return null;
const main = logEntry.runData?.data?.main;
if (!Array.isArray(main)) return null;
for (const branch of main) {
if (!Array.isArray(branch)) continue;
for (const item of branch) {
const session = (item?.json as Record<string, unknown> | undefined)?.session;
if (isMessageAgentSession(session)) return session;
}
}
return null;
}
/**
* Given a log entry, expose a resolved session URL + opener for MessageAnAgent
* runs that emitted a `session` block in their output JSON. Returns `null` for
* any other node-type or runs missing the expected payload, so the caller can
* `v-if` straight off `link`.
*
* Opens in a new tab (matching n8n's other deep links from execution log) so
* the workflow execution view stays in place and so the link still works
* when the logs panel is popped out into its own window.
*/
export function useMessageAgentSessionLink(logEntry: ComputedRef<LogEntry | undefined>): {
link: ComputedRef<{ href: string; open: () => void } | null>;
} {
const router = useRouter();
const link = computed(() => {
const session = extractSession(logEntry.value);
if (!session) return null;
// Guard against the agents module not being mounted (or any router that
// doesn't know the route, e.g. in unit tests). `router.resolve` throws
// for unknown named routes — without this, the button would crash the
// log panel render in environments where agents aren't loaded.
let href: string;
try {
href = router.resolve({
name: AGENT_SESSION_DETAIL_VIEW,
params: {
projectId: session.projectId,
agentId: session.agentId,
threadId: session.sessionId,
},
}).href;
} catch {
return null;
}
return {
href,
open: () => {
window.open(href, '_blank', 'noopener');
},
};
});
return { link };
}

View File

@ -1,4 +1,5 @@
import { createPinia, setActivePinia } from 'pinia';
import { reactive } from 'vue';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { LOCAL_STORAGE_PARALLEL_EVAL_BY_WORKFLOW } from '@/app/constants/localStorage';
@ -11,10 +12,30 @@ vi.mock('@/app/stores/posthog.store', () => ({
})),
}));
// Singleton-shaped mock so the store keeps a stable `settingsStore` reference
// across the test lifetime. Mutating `.settings.evaluationConcurrencyLimit`
// then propagates through the `maxConcurrency` `computed`'s reactive read of
// `settingsStore.settings?.evaluationConcurrencyLimit`, which mirrors how the
// real store updates after `/rest/login` resolves and settings are populated.
const mockSettingsState = reactive({
settings: { evaluationConcurrencyLimit: -1 },
});
vi.mock('@/app/stores/settings.store', () => ({
useSettingsStore: vi.fn(() => mockSettingsState),
}));
const mockEvaluationConcurrencyLimit = (limit: number) => {
mockSettingsState.settings.evaluationConcurrencyLimit = limit;
};
describe('parallelEval.store', () => {
beforeEach(() => {
setActivePinia(createPinia());
localStorage.removeItem(LOCAL_STORAGE_PARALLEL_EVAL_BY_WORKFLOW);
// Reset settings mock between tests; `clearAllMocks` only resets call
// history, not `mockReturnValue` implementations, so cross-test bleed
// would otherwise cap the slider in unrelated cases.
mockEvaluationConcurrencyLimit(-1);
});
afterEach(() => {
@ -132,6 +153,60 @@ describe('parallelEval.store', () => {
});
});
describe('maxConcurrency (admin cap via N8N_CONCURRENCY_EVALUATION_LIMIT)', () => {
it('defaults to 10 when the limit is unset (-1, "unlimited")', () => {
mockEvaluationConcurrencyLimit(-1);
const store = useParallelEvalStore();
expect(store.maxConcurrency).toBe(10);
});
it('lowers the slider ceiling to the configured limit', () => {
mockEvaluationConcurrencyLimit(5);
const store = useParallelEvalStore();
expect(store.maxConcurrency).toBe(5);
});
it('caps at 10 even when the configured limit is higher (BE clamp parity)', () => {
mockEvaluationConcurrencyLimit(50);
const store = useParallelEvalStore();
expect(store.maxConcurrency).toBe(10);
});
it('treats 0 the same as unlimited (BE convention)', () => {
mockEvaluationConcurrencyLimit(0);
const store = useParallelEvalStore();
expect(store.maxConcurrency).toBe(10);
});
it('setConcurrencyValue clamps writes to the configured limit', () => {
mockEvaluationConcurrencyLimit(4);
const store = useParallelEvalStore();
store.setConcurrencyValue('wf-a', 9);
expect(store.concurrencyValue('wf-a')).toBe(4);
});
it('concurrencyValue surfaces the clamped value when admin lowers the cap below a stored preference', () => {
// Pre-existing user preference of 8 (stored when limit was open).
mockEvaluationConcurrencyLimit(-1);
const store = useParallelEvalStore();
store.setConcurrencyValue('wf-a', 8);
expect(store.concurrencyValue('wf-a')).toBe(8);
// Admin lowers cap to 3 — UI should reflect 3, not 8.
mockEvaluationConcurrencyLimit(3);
expect(store.concurrencyValue('wf-a')).toBe(3);
});
it('effectiveConcurrency reflects the cap so the value sent to BE matches the slider', () => {
mockEvaluationConcurrencyLimit(-1);
const store = useParallelEvalStore();
store.setConcurrencyValue('wf-a', 8);
mockEvaluationConcurrencyLimit(2);
expect(store.effectiveConcurrency('wf-a')).toBe(2);
});
});
describe('effectiveConcurrency', () => {
it('returns the slider value when parallel is enabled', () => {
const store = useParallelEvalStore();

View File

@ -5,6 +5,7 @@ import { computed } from 'vue';
import { LOCAL_STORAGE_PARALLEL_EVAL_BY_WORKFLOW } from '@/app/constants/localStorage';
import { usePostHog } from '@/app/stores/posthog.store';
import { useSettingsStore } from '@/app/stores/settings.store';
// Sentinel used for workflows that haven't been saved yet (no id assigned).
// Mirrors the per-workflow localStorage pattern used elsewhere in the editor.
@ -12,6 +13,11 @@ const NEW_WORKFLOW_SENTINEL = 'new';
export const DEFAULT_PARALLEL_CONCURRENCY = 3;
// Hard upper bound for the slider, mirrored on the BE in
// `test-runner.service.ee.ts`'s `runTest` clamp. Admins can lower this via
// `N8N_CONCURRENCY_EVALUATION_LIMIT`; they cannot raise it.
const SLIDER_HARD_MAX = 10;
interface PerWorkflowState {
parallelEnabled: boolean;
concurrencyValue: number;
@ -36,6 +42,7 @@ const buildDefaultState = (): PerWorkflowState => ({
*/
export const useParallelEvalStore = defineStore('parallelEval', () => {
const postHog = usePostHog();
const settingsStore = useSettingsStore();
const storage = useLocalStorage<StoredState>(
LOCAL_STORAGE_PARALLEL_EVAL_BY_WORKFLOW,
{},
@ -48,6 +55,18 @@ export const useParallelEvalStore = defineStore('parallelEval', () => {
() => postHog.isFeatureEnabled(EVAL_PARALLEL_EXECUTION_FLAG) === true,
);
// Effective slider ceiling: the BE's `N8N_CONCURRENCY_EVALUATION_LIMIT`
// (`evaluationConcurrencyLimit`) further constrains the 110 UX range.
// `<= 0` means unlimited per BE convention. Mirrors the runtime clamp in
// `test-runner.service.ee.ts:runTest` so the slider can only offer values
// the BE will actually accept.
const maxConcurrency = computed(() => {
const limit = settingsStore.settings?.evaluationConcurrencyLimit;
return typeof limit === 'number' && limit > 0
? Math.min(SLIDER_HARD_MAX, Math.floor(limit))
: SLIDER_HARD_MAX;
});
const resolveKey = (workflowId: string | undefined): string =>
workflowId && workflowId.length > 0 ? workflowId : NEW_WORKFLOW_SENTINEL;
@ -65,8 +84,12 @@ export const useParallelEvalStore = defineStore('parallelEval', () => {
const isParallel = (workflowId: string | undefined): boolean =>
ensureEntry(resolveKey(workflowId)).parallelEnabled;
// Read-side clamp (no mutation): if the admin lowers
// `N8N_CONCURRENCY_EVALUATION_LIMIT` below a previously-stored value, the
// UI surfaces the capped number while leaving the user's preference intact
// in localStorage so it returns naturally if the cap is later raised.
const concurrencyValue = (workflowId: string | undefined): number =>
ensureEntry(resolveKey(workflowId)).concurrencyValue;
Math.min(ensureEntry(resolveKey(workflowId)).concurrencyValue, maxConcurrency.value);
const setParallel = (workflowId: string | undefined, value: boolean): void => {
ensureEntry(resolveKey(workflowId)).parallelEnabled = value;
@ -80,7 +103,7 @@ export const useParallelEvalStore = defineStore('parallelEval', () => {
// the checked-but-cleared UX feels natural rather than dropping to
// sequential behind the user's back.
const safe = Number.isFinite(value) ? value : DEFAULT_PARALLEL_CONCURRENCY;
const clamped = Math.max(1, Math.min(10, Math.floor(safe)));
const clamped = Math.max(1, Math.min(maxConcurrency.value, Math.floor(safe)));
ensureEntry(resolveKey(workflowId)).concurrencyValue = clamped;
};
@ -92,11 +115,14 @@ export const useParallelEvalStore = defineStore('parallelEval', () => {
*/
const effectiveConcurrency = (workflowId: string | undefined): number => {
const state = ensureEntry(resolveKey(workflowId));
return state.parallelEnabled ? state.concurrencyValue : 1;
// Use the read-side clamped value so what we send matches what the
// slider shows when the admin cap is below the stored preference.
return state.parallelEnabled ? Math.min(state.concurrencyValue, maxConcurrency.value) : 1;
};
return {
isFeatureEnabled,
maxConcurrency,
isParallel,
concurrencyValue,
setParallel,

View File

@ -125,7 +125,7 @@ watch(runningTestRun, (run) => {
<ConcurrencySlider
v-model="concurrencyModel"
:min="1"
:max="10"
:max="parallelEvalStore.maxConcurrency"
:step="1"
show-stops
:class="$style.concurrencySlider"

View File

@ -36,6 +36,8 @@ const mockFetchPreferences = vi.fn();
const mockUpdatePreferences = vi.fn();
const mockFetchModelCredentials = vi.fn().mockResolvedValue([]);
const mockFetchServiceCredentials = vi.fn().mockResolvedValue([]);
const mockCreateGatewayLink = vi.fn();
const mockDisconnectGatewaySession = vi.fn();
vi.mock('../instanceAi.settings.api', () => ({
fetchSettings: (...args: unknown[]) => mockFetchSettings(...args),
@ -48,8 +50,8 @@ vi.mock('../instanceAi.settings.api', () => ({
const mockGetGatewayStatus = vi.fn();
vi.mock('../instanceAi.api', () => ({
createGatewayLink: vi.fn(),
disconnectGatewaySession: vi.fn(),
createGatewayLink: (...args: unknown[]) => mockCreateGatewayLink(...args),
disconnectGatewaySession: (...args: unknown[]) => mockDisconnectGatewaySession(...args),
getGatewayStatus: (...args: unknown[]) => mockGetGatewayStatus(...args),
}));
@ -396,4 +398,86 @@ describe('useInstanceAiSettingsStore', () => {
expect(store.connections[0].type).toBe('computer-use');
});
});
describe('setup command', () => {
beforeEach(() => {
setModuleSettings(settingsStore, {
enabled: true,
localGatewayDisabled: false,
proxyEnabled: false,
optinModalDismissed: false,
cloudManaged: false,
});
setUserPreference(store, { localGatewayDisabled: false });
});
afterEach(() => {
vi.useRealTimers();
});
it('clears stale command state while fetching a new setup command', async () => {
let resolveRequest: (value: {
command: string;
expiresAt: string;
ttlSeconds: number;
}) => void = () => {};
mockCreateGatewayLink.mockReturnValue(
new Promise((resolve) => {
resolveRequest = resolve;
}),
);
store.setupCommand = 'old command';
store.setupCommandExpiresAt = '2026-01-01T00:00:00.000Z';
store.setupCommandTtlSeconds = 1;
store.setupCommandFetchedAt = 1;
const request = store.fetchSetupCommand();
expect(store.setupCommand).toBeNull();
expect(store.setupCommandExpiresAt).toBeNull();
expect(store.setupCommandTtlSeconds).toBeNull();
expect(store.setupCommandFetchedAt).toBeNull();
resolveRequest({
command: 'new command',
expiresAt: '2026-01-01T00:05:00.000Z',
ttlSeconds: 300,
});
await request;
expect(store.setupCommand).toBe('new command');
});
it('uses the request start time as setup command countdown baseline', async () => {
vi.useFakeTimers();
vi.setSystemTime(new Date('2026-01-01T00:00:00.000Z'));
mockCreateGatewayLink.mockImplementation(async () => {
vi.setSystemTime(new Date('2026-01-01T00:00:10.000Z'));
return {
command: 'command',
expiresAt: '2026-01-01T00:05:00.000Z',
ttlSeconds: 300,
};
});
await store.fetchSetupCommand();
expect(store.setupCommandFetchedAt).toBe(new Date('2026-01-01T00:00:00.000Z').getTime());
});
it('clears setup command state on disconnect', async () => {
mockDisconnectGatewaySession.mockResolvedValue(undefined);
store.setupCommand = 'old command';
store.setupCommandExpiresAt = '2026-01-01T00:00:00.000Z';
store.setupCommandTtlSeconds = 1;
store.setupCommandFetchedAt = 1;
await store.disconnectComputerUse();
expect(store.setupCommand).toBeNull();
expect(store.setupCommandExpiresAt).toBeNull();
expect(store.setupCommandTtlSeconds).toBeNull();
expect(store.setupCommandFetchedAt).toBeNull();
});
});
});

View File

@ -11,11 +11,25 @@ import ConfirmationFooter from './ConfirmationFooter.vue';
import ConfirmationPreview from './ConfirmationPreview.vue';
import SplitButton from './SplitButton.vue';
type InstanceGatewayResourceDecision = 'denyOnce' | 'allowOnce' | 'allowForSession';
const INSTANCE_GATEWAY_RESOURCE_DECISIONS = [
'denyOnce',
'allowOnce',
'allowForSession',
] as const satisfies readonly InstanceGatewayResourceDecision[];
function isInstanceGatewayResourceDecision(
value: string,
): value is InstanceGatewayResourceDecision {
return (INSTANCE_GATEWAY_RESOURCE_DECISIONS as readonly string[]).includes(value);
}
const props = defineProps<{
requestId: string;
resource: string;
description: string;
options: string[];
options: InstanceGatewayResourceDecision[];
}>();
const i18n = useI18n();
@ -24,23 +38,21 @@ const rootStore = useRootStore();
const store = useInstanceAiStore();
interface OptionEntry {
decision: string;
decision: InstanceGatewayResourceDecision;
label: string;
}
const DECISION_LABELS: Record<string, string> = {
const DECISION_LABELS: Record<InstanceGatewayResourceDecision, string> = {
allowOnce: i18n.baseText('instanceAi.gatewayConfirmation.allowOnce'),
allowForSession: i18n.baseText('instanceAi.gatewayConfirmation.allowForSession'),
denyOnce: i18n.baseText('instanceAi.gatewayConfirmation.denyOnce'),
};
const KNOWN_DECISIONS = new Set(['allowOnce', 'allowForSession', 'denyOnce']);
function getDecisionLabel(decision: string): string {
return DECISION_LABELS[decision] ?? decision;
function getDecisionLabel(decision: InstanceGatewayResourceDecision): string {
return DECISION_LABELS[decision];
}
function optionEntry(decision: string): OptionEntry {
function optionEntry(decision: InstanceGatewayResourceDecision): OptionEntry {
return { decision, label: getDecisionLabel(decision) };
}
@ -53,17 +65,13 @@ const approvePrimary = computed(() =>
);
const approveDropdownItems = computed(() => {
const items: Array<ActionDropdownItem<string>> = [];
const items: Array<ActionDropdownItem<InstanceGatewayResourceDecision>> = [];
if (props.options.includes('allowForSession'))
items.push({ id: 'allowForSession', label: getDecisionLabel('allowForSession') });
return items;
});
const otherOptions = computed<OptionEntry[]>(() =>
props.options.filter((d) => !KNOWN_DECISIONS.has(d)).map(optionEntry),
);
async function confirm(decision: string) {
async function confirm(decision: InstanceGatewayResourceDecision) {
const tc = store.findToolCallByRequestId(props.requestId);
const inputThreadId = tc?.confirmation?.inputThreadId ?? '';
const eventProps = {
@ -93,16 +101,6 @@ async function confirm(decision: string) {
</div>
<ConfirmationFooter>
<!-- Unknown options not in the standard set -->
<N8nButton
v-for="opt in otherOptions"
:key="opt.decision"
variant="outline"
size="medium"
:label="opt.label"
@click="confirm(opt.decision)"
/>
<!-- Deny side -->
<N8nButton
v-if="denyPrimary"
@ -122,7 +120,7 @@ async function confirm(decision: string) {
data-test-id="gateway-decision-approve"
caret-aria-label="More approve options"
@click="confirm(approvePrimary.decision)"
@select="confirm"
@select="(id: string) => isInstanceGatewayResourceDecision(id) && confirm(id)"
/>
</template>
</ConfirmationFooter>

View File

@ -1,5 +1,5 @@
<script lang="ts" setup>
import { computed, onMounted, ref } from 'vue';
import { computed, onBeforeUnmount, onMounted, ref, watch } from 'vue';
import { N8nHeading, N8nIcon, N8nIconButton, N8nText } from '@n8n/design-system';
import type { IconName } from '@n8n/design-system';
import { useI18n, type BaseTextKey } from '@n8n/i18n';
@ -46,6 +46,29 @@ const osTabs = [
];
const displayCommand = computed(() => store.setupCommand ?? 'npx @n8n/computer-use');
const canCopyCommand = computed(() => store.setupCommand !== null);
const nowMs = ref(Date.now());
let expiryTimer: ReturnType<typeof setInterval> | null = null;
const tokenExpiresInSeconds = computed(() => {
if (store.setupCommandTtlSeconds !== null && store.setupCommandFetchedAt !== null) {
const elapsedSeconds = Math.floor((nowMs.value - store.setupCommandFetchedAt) / 1000);
return Math.max(0, store.setupCommandTtlSeconds - elapsedSeconds);
}
return null;
});
const tokenExpiryText = computed(() => {
if (tokenExpiresInSeconds.value === null) return null;
if (tokenExpiresInSeconds.value === 0) {
return i18n.baseText('instanceAi.welcomeModal.gateway.tokenExpired');
}
const minutes = Math.max(1, Math.ceil(tokenExpiresInSeconds.value / 60));
return i18n.baseText('instanceAi.welcomeModal.gateway.tokenExpiresIn', {
interpolate: { minutes: String(minutes) },
});
});
const terminalInstructionsKey = computed(() => {
if (selectedOs.value === 'windows') return 'instanceAi.welcomeModal.gateway.instructions.windows';
@ -112,7 +135,11 @@ function onCommandScroll(e: Event) {
async function copyCommand() {
try {
await navigator.clipboard.writeText(displayCommand.value);
if (tokenExpiresInSeconds.value === 0) {
await store.fetchSetupCommand();
}
if (!store.setupCommand) return;
await navigator.clipboard.writeText(store.setupCommand);
copied.value = true;
setTimeout(() => {
copied.value = false;
@ -127,6 +154,27 @@ async function copyCommand() {
onMounted(() => {
void store.fetchSetupCommand();
});
watch(
() => [store.setupCommandFetchedAt, store.setupCommandTtlSeconds] as const,
([fetchedAt, ttlSeconds]) => {
if (expiryTimer) {
clearInterval(expiryTimer);
expiryTimer = null;
}
if (!(fetchedAt !== null && ttlSeconds !== null)) return;
nowMs.value = Date.now();
expiryTimer = setInterval(() => {
nowMs.value = Date.now();
}, 1000);
},
{ immediate: true },
);
onBeforeUnmount(() => {
if (expiryTimer) clearInterval(expiryTimer);
store.clearSetupCommand();
});
</script>
<template>
@ -205,9 +253,18 @@ onMounted(() => {
:class="$style.copyButton"
:aria-label="copyCommandAriaLabel"
data-test-id="computer-use-setup-copy-command"
:disabled="!canCopyCommand"
@click="copyCommand"
/>
</div>
<div :class="$style.commandMeta">
<N8nText v-if="tokenExpiryText" size="small" color="text-light">
{{ tokenExpiryText }}
</N8nText>
<N8nText size="small" color="text-light">
{{ i18n.baseText('instanceAi.welcomeModal.gateway.leadingSpaceHint') }}
</N8nText>
</div>
<div :class="$style.waitingRow">
<N8nIcon icon="spinner" color="primary" spin size="small" />
<span>{{ i18n.baseText('instanceAi.welcomeModal.gateway.waiting') }}</span>
@ -300,6 +357,14 @@ onMounted(() => {
background: var(--color--background--shade-2);
}
.commandMeta {
display: flex;
flex-direction: column;
gap: var(--spacing--5xs);
padding: 0 var(--spacing--xs) var(--spacing--xs);
background: var(--color--background--shade-2);
}
.commandText {
color: var(--color--text--tint-1);
white-space: nowrap;

View File

@ -115,17 +115,21 @@ export async function getInstanceAiCredits(
}
/**
* POST /instance-ai/gateway/create-link -> { token, command }
* POST /instance-ai/gateway/create-link -> { token, command, expiresAt, ttlSeconds }
* Generate a dynamic gateway token and pre-built CLI command.
*/
export async function createGatewayLink(
context: IRestApiContext,
): Promise<{ token: string; command: string }> {
return await makeRestApiRequest<{ token: string; command: string }>(
context,
'POST',
'/instance-ai/gateway/create-link',
);
export async function createGatewayLink(context: IRestApiContext): Promise<{
token: string;
command: string;
expiresAt: string | null;
ttlSeconds: number | null;
}> {
return await makeRestApiRequest<{
token: string;
command: string;
expiresAt: string | null;
ttlSeconds: number | null;
}>(context, 'POST', '/instance-ai/gateway/create-link');
}
/**

View File

@ -6,6 +6,7 @@ import {
isSafeObjectKey,
type InstanceAiConfirmation,
type InstanceAiConfirmRequest,
type InstanceAiResourceDecision,
type InstanceAiAttachment,
type InstanceAiEvent,
type InstanceAiMessage,
@ -794,7 +795,10 @@ export function createThreadRuntime(initialThreadId: string, hooks: ThreadRuntim
}
}
async function confirmResourceDecision(requestId: string, decision: string): Promise<void> {
async function confirmResourceDecision(
requestId: string,
decision: InstanceAiResourceDecision,
): Promise<void> {
resolveConfirmation(requestId, 'approved');
await confirmAction(requestId, { kind: 'resourceDecision', resourceDecision: decision });
}

View File

@ -45,6 +45,10 @@ export const useInstanceAiSettingsStore = defineStore('instanceAiSettings', () =
const HAS_CONNECTED_STORAGE_KEY = 'instanceAi.gateway.hasConnected';
const isDaemonConnecting = ref(false);
const setupCommand = ref<string | null>(null);
const setupCommandExpiresAt = ref<string | null>(null);
const setupCommandTtlSeconds = ref<number | null>(null);
const setupCommandFetchedAt = ref<number | null>(null);
let setupCommandRequestId = 0;
const hasEverConnectedGateway = ref(
typeof localStorage !== 'undefined' &&
@ -297,6 +301,7 @@ export const useInstanceAiSettingsStore = defineStore('instanceAiSettings', () =
);
return;
}
clearSetupCommand();
clearGatewayEverConnected();
gatewayConnected.value = false;
gatewayToolCategories.value = [];
@ -451,11 +456,29 @@ export const useInstanceAiSettingsStore = defineStore('instanceAiSettings', () =
}
}
function clearSetupCommand(): void {
setupCommandRequestId++;
setupCommand.value = null;
setupCommandExpiresAt.value = null;
setupCommandTtlSeconds.value = null;
setupCommandFetchedAt.value = null;
}
async function fetchSetupCommand(): Promise<void> {
const requestId = ++setupCommandRequestId;
setupCommand.value = null;
setupCommandExpiresAt.value = null;
setupCommandTtlSeconds.value = null;
setupCommandFetchedAt.value = null;
if (isLocalGatewayDisabled.value) return;
const requestStartedAt = Date.now();
try {
const result = await createGatewayLink(rootStore.restApiContext);
if (requestId !== setupCommandRequestId) return;
setupCommand.value = result.command;
setupCommandExpiresAt.value = result.expiresAt;
setupCommandTtlSeconds.value = result.ttlSeconds;
setupCommandFetchedAt.value = requestStartedAt;
} catch {
// Fallback handled in the component
}
@ -512,6 +535,9 @@ export const useInstanceAiSettingsStore = defineStore('instanceAiSettings', () =
// Gateway / daemon
isDaemonConnecting,
setupCommand,
setupCommandExpiresAt,
setupCommandTtlSeconds,
setupCommandFetchedAt,
hasEverConnectedGateway,
isGatewayConnected,
gatewayStatusLoaded,
@ -529,6 +555,7 @@ export const useInstanceAiSettingsStore = defineStore('instanceAiSettings', () =
startGatewayPushListener,
stopGatewayPushListener,
fetchSetupCommand,
clearSetupCommand,
refreshCredentials,
refreshModuleSettings,
// Sidebar connections

View File

@ -18,6 +18,8 @@ import type { LogEntry } from '../logs.types';
import { createTestLogEntry } from '../__test__/mocks';
import { createRunExecutionData, NodeConnectionTypes } from 'n8n-workflow';
import { HTML_NODE_TYPE } from '@/app/constants';
import { MESSAGE_AN_AGENT_NODE_TYPE } from '@/app/constants/nodeTypes';
import { AGENT_SESSION_DETAIL_VIEW } from '@/features/agents/constants';
import { WorkflowIdKey } from '@/app/constants/injectionKeys';
describe('LogDetailsPanel', () => {
@ -65,7 +67,14 @@ describe('LogDetailsPanel', () => {
plugins: [
createRouter({
history: createWebHistory(),
routes: [{ path: '/', component: () => h('div') }],
routes: [
{ path: '/', component: () => h('div') },
{
name: AGENT_SESSION_DETAIL_VIEW,
path: '/projects/:projectId/agents/:agentId/sessions/:threadId',
component: () => h('div'),
},
],
}),
pinia,
],
@ -198,6 +207,85 @@ describe('LogDetailsPanel', () => {
).toBeInTheDocument();
});
describe('messageAnAgent View Session button', () => {
const messageAgentNode = createTestNode({
name: 'Message an Agent',
type: MESSAGE_AN_AGENT_NODE_TYPE,
});
const messageAgentRunData = createTestTaskData({
executionStatus: 'success',
data: {
main: [
[
{
json: {
response: 'hi',
session: {
agentId: 'agent-1',
projectId: 'project-1',
sessionId: 'thread-1',
},
},
},
],
],
},
});
const baseProps = {
isOpen: true,
panels: LOG_DETAILS_PANEL_STATE.BOTH,
collapsingInputTableColumnName: null,
collapsingOutputTableColumnName: null,
isHeaderClickable: true,
};
it('renders a View Session button when run output carries a session block', () => {
const rendered = render({
...baseProps,
logEntry: createLogEntry({
node: messageAgentNode,
runIndex: 0,
runData: messageAgentRunData,
execution: createRunExecutionData({
resultData: { runData: { 'Message an Agent': [messageAgentRunData] } },
}),
}),
});
expect(rendered.queryByTestId('log-details-view-agent-session')).toBeInTheDocument();
});
it('does not render the button for nodes that are not messageAnAgent', () => {
const rendered = render({
...baseProps,
logEntry: createLogEntry({ node: aiNode, runIndex: 0, runData: aiNodeRunData }),
});
expect(rendered.queryByTestId('log-details-view-agent-session')).not.toBeInTheDocument();
});
it('does not render the button when the session block is missing', () => {
const noSessionRunData = createTestTaskData({
executionStatus: 'success',
data: { main: [[{ json: { response: 'hi' } }]] },
});
const rendered = render({
...baseProps,
logEntry: createLogEntry({
node: messageAgentNode,
runIndex: 0,
runData: noSessionRunData,
execution: createRunExecutionData({
resultData: { runData: { 'Message an Agent': [noSessionRunData] } },
}),
}),
});
expect(rendered.queryByTestId('log-details-view-agent-session')).not.toBeInTheDocument();
});
});
it('should render output data in HTML mode for HTML node', async () => {
const nodeA = createTestNode({ name: 'A' });
const nodeB = createTestNode({

View File

@ -26,7 +26,8 @@ import { useExecutionRedaction } from '@/features/execution/executions/composabl
import { useUIStore } from '@/app/stores/ui.store';
import { WORKFLOW_SETTINGS_MODAL_KEY } from '@/app/constants/modals';
import RedactedDataState from '@/features/ndv/panel/components/RedactedDataState.vue';
import { N8nButton, N8nResizeWrapper, N8nText } from '@n8n/design-system';
import { N8nButton, N8nIcon, N8nResizeWrapper, N8nText } from '@n8n/design-system';
import { useMessageAgentSessionLink } from '@/features/agents/composables/useMessageAgentSessionLink';
const MIN_IO_PANEL_WIDTH = 200;
const {
@ -69,6 +70,7 @@ const { isRedacted, canReveal, isDynamicCredentials, revealData } = useExecution
const type = computed(() => nodeTypeStore.getNodeType(logEntry.node.type));
const consumedTokens = computed(() => getSubtreeTotalConsumedTokens(logEntry, false));
const isTriggerNode = computed(() => type.value?.group.includes('trigger'));
const { link: messageAgentSessionLink } = useMessageAgentSessionLink(computed(() => logEntry));
const container = useTemplateRef<HTMLElement>('container');
const resizer = useResizablePanel('N8N_LOGS_INPUT_PANEL_WIDTH', {
container,
@ -127,6 +129,16 @@ function handleResizeEnd() {
</template>
<template #actions>
<div v-if="isOpen && !isTriggerNode && !isPlaceholderLog(logEntry)" :class="$style.actions">
<N8nButton
v-if="messageAgentSessionLink"
variant="subtle"
size="xsmall"
data-test-id="log-details-view-agent-session"
@click.stop="messageAgentSessionLink.open()"
>
<N8nIcon icon="external-link" :class="$style.viewSessionIcon" />
{{ locale.baseText('logs.details.header.actions.viewAgentSession') }}
</N8nButton>
<KeyboardShortcutTooltip
:label="locale.baseText('generic.shortcutHint')"
:shortcut="{ keys: ['i'] }"
@ -255,6 +267,10 @@ function handleResizeEnd() {
margin-right: var(--spacing--2xs);
}
.viewSessionIcon {
margin-right: var(--spacing--3xs);
}
.executionSummary {
flex-shrink: 1;
}

View File

@ -76,7 +76,7 @@ const headers = computed<Array<TableHeader<EncryptionKey>>>(() => [
minWidth: 220,
},
{
title: i18n.baseText('settings.encryptionKeys.column.type'),
title: i18n.baseText('settings.encryptionKeys.column.status'),
key: 'status',
value: (row) => row.status,
minWidth: 120,

View File

@ -1,5 +1,7 @@
import type { ICredentialType, INodeProperties } from 'n8n-workflow';
const defaultScopes = 'activity:read_all,activity:write';
export class StravaOAuth2Api implements ICredentialType {
name = 'stravaOAuth2Api';
@ -30,19 +32,50 @@ export class StravaOAuth2Api implements ICredentialType {
default: 'https://www.strava.com/oauth/token',
required: true,
},
{
displayName: 'Scope',
name: 'scope',
type: 'hidden',
default: 'activity:read_all,activity:write',
required: true,
},
{
displayName: 'Auth URI Query Parameters',
name: 'authQueryParameters',
type: 'hidden',
default: '',
},
{
displayName: 'Custom Scopes',
name: 'customScopes',
type: 'boolean',
default: false,
description: 'Whether to define custom OAuth2 scopes instead of the defaults',
},
{
displayName:
'The default scopes needed for the node to work are already set. If you change these the node may not function correctly.',
name: 'customScopesNotice',
type: 'notice',
default: '',
displayOptions: {
show: {
customScopes: [true],
},
},
},
{
displayName: 'Enabled Scopes',
name: 'enabledScopes',
type: 'string',
displayOptions: {
show: {
customScopes: [true],
},
},
default: defaultScopes,
description: 'Comma-separated list of Strava OAuth2 scopes to request',
},
{
displayName: 'Scope',
name: 'scope',
type: 'hidden',
default: `={{$self["customScopes"] ? $self["enabledScopes"] : "${defaultScopes}"}}`,
required: true,
},
{
displayName: 'Authentication',
name: 'authentication',

View File

@ -33,9 +33,11 @@ export class TrelloApi implements ICredentialType {
{
displayName: 'OAuth Secret',
name: 'oauthSecret',
type: 'hidden',
type: 'string',
typeOptions: { password: true },
default: '',
description:
'Used to verify webhook authenticity. Found under the API Key tab at trello.com/power-ups/admin.',
},
];

View File

@ -0,0 +1,47 @@
import { StravaOAuth2Api } from '../StravaOAuth2Api.credentials';
describe('StravaOAuth2Api Credential', () => {
const credential = new StravaOAuth2Api();
const defaultScopes = 'activity:read_all,activity:write';
it('should have correct credential metadata', () => {
expect(credential.name).toBe('stravaOAuth2Api');
expect(credential.extends).toEqual(['oAuth2Api']);
const authUrlProperty = credential.properties.find((p) => p.name === 'authUrl');
expect(authUrlProperty?.default).toBe('https://www.strava.com/oauth/authorize');
const accessTokenUrlProperty = credential.properties.find((p) => p.name === 'accessTokenUrl');
expect(accessTokenUrlProperty?.default).toBe('https://www.strava.com/oauth/token');
});
it('should use body authentication', () => {
const authenticationProperty = credential.properties.find((p) => p.name === 'authentication');
expect(authenticationProperty?.type).toBe('hidden');
expect(authenticationProperty?.default).toBe('body');
});
it('should have custom scopes toggle defaulting to false', () => {
const customScopesProperty = credential.properties.find((p) => p.name === 'customScopes');
expect(customScopesProperty?.type).toBe('boolean');
expect(customScopesProperty?.default).toBe(false);
});
it('should have enabledScopes defaulting to the current default scope list', () => {
const enabledScopesProperty = credential.properties.find((p) => p.name === 'enabledScopes');
expect(enabledScopesProperty?.default).toBe(defaultScopes);
});
it('should only show enabledScopes when customScopes is true', () => {
const enabledScopesProperty = credential.properties.find((p) => p.name === 'enabledScopes');
expect(enabledScopesProperty?.displayOptions?.show?.customScopes).toEqual([true]);
});
it('should use enabledScopes when customScopes is true, otherwise fall back to defaults', () => {
const scopeProperty = credential.properties.find((p) => p.name === 'scope');
expect(scopeProperty?.type).toBe('hidden');
expect(scopeProperty?.default).toBe(
`={{$self["customScopes"] ? $self["enabledScopes"] : "${defaultScopes}"}}`,
);
});
});

View File

@ -60,6 +60,19 @@ export const spaceIdProperty: INodeProperties = {
'Space resource name, in the form "spaces/*". Example: spaces/AAAAMpdlehY. Choose from the list, or specify an ID using an <a href="https://docs.n8n.io/code/expressions/">expression</a>.',
};
const messageResourceNameDescription =
'Resource name of the message. Format: spaces/{space}/messages/{message}. For system-assigned IDs, use the full message name, such as spaces/AAAAAAAAAAA/messages/BBBBBBBBBBB.BBBBBBBBBBB. For custom IDs, use spaces/AAAAAAAAAAA/messages/client-custom-name.';
const messageResourceNameProperties: INodeProperties = {
displayName: 'Message Resource Name',
name: 'messageId',
type: 'string',
required: true,
default: '',
placeholder: 'e.g. spaces/AAAAAAAAAAA/messages/BBBBBBBBBBB.BBBBBBBBBBB',
description: messageResourceNameDescription,
};
export const messageFields: INodeProperties[] = [
/* -------------------------------------------------------------------------- */
/* message:create */
@ -223,54 +236,39 @@ export const messageFields: INodeProperties[] = [
/* messages:delete */
/* -------------------------------------------------------------------------- */
{
displayName: 'Message ID',
name: 'messageId',
type: 'string',
required: true,
...messageResourceNameProperties,
displayOptions: {
show: {
resource: ['message'],
operation: ['delete'],
},
},
default: '',
description: 'Resource name of the message to be deleted, in the form "spaces//messages/"',
},
/* -------------------------------------------------------------------------- */
/* message:get */
/* -------------------------------------------------------------------------- */
{
displayName: 'Message ID',
name: 'messageId',
type: 'string',
required: true,
...messageResourceNameProperties,
displayOptions: {
show: {
resource: ['message'],
operation: ['get'],
},
},
default: '',
description: 'Resource name of the message to be retrieved, in the form "spaces//messages/"',
},
/* -------------------------------------------------------------------------- */
/* message:update */
/* -------------------------------------------------------------------------- */
{
displayName: 'Message ID',
name: 'messageId',
type: 'string',
required: true,
...messageResourceNameProperties,
displayOptions: {
show: {
resource: ['message'],
operation: ['update'],
},
},
default: '',
description: 'Resource name of the message to be updated, in the form "spaces//messages/"',
},
{
displayName: 'JSON Parameters',

View File

@ -693,7 +693,15 @@ export class HttpRequestV1 implements INodeType {
const parametersAreJson = this.getNodeParameter('jsonParameters', itemIndex);
const options = this.getNodeParameter('options', itemIndex, {});
const url = this.getNodeParameter('url', itemIndex) as string;
const url = this.getNodeParameter('url', itemIndex);
if (typeof url !== 'string') {
const actualType = url === null ? 'null' : typeof url;
throw new NodeOperationError(
this.getNode(),
`URL parameter must be a string, got ${actualType}`,
);
}
if (!url.startsWith('http://') && !url.startsWith('https://')) {
throw new NodeOperationError(

View File

@ -740,7 +740,15 @@ export class HttpRequestV2 implements INodeType {
const parametersAreJson = this.getNodeParameter('jsonParameters', itemIndex);
const options = this.getNodeParameter('options', itemIndex, {});
const url = this.getNodeParameter('url', itemIndex) as string;
const url = this.getNodeParameter('url', itemIndex);
if (typeof url !== 'string') {
const actualType = url === null ? 'null' : typeof url;
throw new NodeOperationError(
this.getNode(),
`URL parameter must be a string, got ${actualType}`,
);
}
if (!url.startsWith('http://') && !url.startsWith('https://')) {
throw new NodeOperationError(

View File

@ -0,0 +1,70 @@
import type { IExecuteFunctions, INodeTypeBaseDescription } from 'n8n-workflow';
import { HttpRequestV1 } from '../../V1/HttpRequestV1.node';
describe('HttpRequestV1', () => {
let node: HttpRequestV1;
let executeFunctions: IExecuteFunctions;
beforeEach(() => {
const baseDescription: INodeTypeBaseDescription = {
displayName: 'HTTP Request',
name: 'httpRequest',
description: 'Makes an HTTP request and returns the response data',
group: [],
};
node = new HttpRequestV1(baseDescription);
executeFunctions = {
getInputData: jest.fn(() => [{ json: {} }]),
getNodeParameter: jest.fn(),
getNode: jest.fn(() => ({
type: 'n8n-nodes-base.httpRequest',
typeVersion: 1,
})),
getCredentials: jest.fn(),
helpers: {
request: jest.fn(),
requestOAuth1: jest.fn(),
requestOAuth2: jest.fn(),
assertBinaryData: jest.fn(),
getBinaryStream: jest.fn(),
getBinaryMetadata: jest.fn(),
binaryToString: jest.fn(),
prepareBinaryData: jest.fn(),
},
getContext: jest.fn(),
sendMessageToUI: jest.fn(),
continueOnFail: jest.fn(),
getMode: jest.fn(),
} as unknown as IExecuteFunctions;
});
describe('URL Parameter Validation', () => {
it.each([
{ url: undefined, expectedType: 'undefined' },
{ url: null, expectedType: 'null' },
{ url: 42, expectedType: 'number' },
])('should throw error when URL is $expectedType', async ({ url, expectedType }) => {
(executeFunctions.getNodeParameter as jest.Mock).mockImplementation((paramName: string) => {
switch (paramName) {
case 'responseFormat':
return 'json';
case 'requestMethod':
return 'GET';
case 'jsonParameters':
return false;
case 'options':
return {};
case 'url':
return url;
default:
return undefined;
}
});
await expect(node.execute.call(executeFunctions)).rejects.toThrow(
`URL parameter must be a string, got ${expectedType}`,
);
});
});
});

View File

@ -160,4 +160,36 @@ describe('HttpRequestV2', () => {
},
);
});
describe('URL Parameter Validation', () => {
it.each([
{ url: undefined, expectedType: 'undefined' },
{ url: null, expectedType: 'null' },
{ url: 42, expectedType: 'number' },
])('should throw error when URL is $expectedType', async ({ url, expectedType }) => {
(executeFunctions.getInputData as jest.Mock).mockReturnValue([{ json: {} }]);
(executeFunctions.getNodeParameter as jest.Mock).mockImplementation((paramName: string) => {
switch (paramName) {
case 'responseFormat':
return 'json';
case 'requestMethod':
return 'GET';
case 'url':
return url;
case 'authentication':
return 'none';
case 'jsonParameters':
return false;
case 'options':
return options;
default:
return undefined;
}
});
await expect(node.execute.call(executeFunctions)).rejects.toThrow(
`URL parameter must be a string, got ${expectedType}`,
);
});
});
});

View File

@ -25,6 +25,13 @@ export class MessageAnAgent implements INodeType {
inputs: [NodeConnectionTypes.Main],
outputs: [NodeConnectionTypes.Main],
properties: [
{
displayName:
'Only published agents are listed below. Publish an agent before referencing it from a workflow.',
name: 'publishedAgentNotice',
type: 'notice',
default: '',
},
{
displayName: 'Agent',
name: 'agentId',
@ -70,6 +77,23 @@ export class MessageAnAgent implements INodeType {
rows: 4,
},
},
{
displayName: 'Advanced',
name: 'advanced',
type: 'collection',
placeholder: 'Add Option',
default: {},
options: [
{
displayName: 'Session ID',
name: 'sessionId',
type: 'string',
default: '',
description:
'Reuse an agent session to keep memory across runs. Leave empty to start a fresh session per execution.',
},
],
},
],
};
@ -116,6 +140,8 @@ export class MessageAnAgent implements INodeType {
};
const agentId = agentIdRlc.value;
const message = this.getNodeParameter('message', i) as string;
const advanced = this.getNodeParameter('advanced', i, {}) as { sessionId?: string };
const sessionIdOverride = advanced.sessionId?.trim();
if (!message.trim()) {
throw new NodeOperationError(this.getNode(), 'Message cannot be empty', {
@ -123,7 +149,12 @@ export class MessageAnAgent implements INodeType {
});
}
const result = await this.executeAgent({ agentId }, message, executionId, i);
const result = await this.executeAgent(
{ agentId, sessionId: sessionIdOverride || undefined },
message,
executionId,
i,
);
returnData.push({
json: {
@ -132,6 +163,7 @@ export class MessageAnAgent implements INodeType {
usage: result.usage as unknown as IDataObject,
toolCalls: result.toolCalls as unknown as IDataObject[],
finishReason: result.finishReason,
session: result.session as unknown as IDataObject,
},
pairedItem: { item: i },
});

View File

@ -8,6 +8,12 @@ describe('MessageAnAgent Node', () => {
let node: MessageAnAgent;
let executeFunctions: jest.Mocked<IExecuteFunctions>;
const mockSession = {
agentId: 'agent-1',
projectId: 'project-1',
sessionId: 'exec-123-0',
};
const mockAgentResult: ExecuteAgentData = {
response: 'Hello from agent',
structuredOutput: null,
@ -18,6 +24,7 @@ describe('MessageAnAgent Node', () => {
},
toolCalls: [],
finishReason: 'stop',
session: mockSession,
};
beforeEach(() => {
@ -39,17 +46,20 @@ describe('MessageAnAgent Node', () => {
it('should send a message and return the agent response', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return 'Hello agent';
return undefined;
});
executeFunctions.getNodeParameter.mockImplementation(
(param: string, _itemIndex?: number, fallback?: unknown) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return 'Hello agent';
if (param === 'advanced') return fallback ?? {};
return undefined;
},
);
executeFunctions.executeAgent.mockResolvedValue(mockAgentResult);
const result = await node.execute.call(executeFunctions);
expect(executeFunctions.executeAgent).toHaveBeenCalledWith(
{ agentId: 'agent-1' },
{ agentId: 'agent-1', sessionId: undefined },
'Hello agent',
'exec-123',
0,
@ -63,6 +73,7 @@ describe('MessageAnAgent Node', () => {
usage: { promptTokens: 10, completionTokens: 20, totalTokens: 30 },
toolCalls: [],
finishReason: 'stop',
session: mockSession,
},
pairedItem: { item: 0 },
},
@ -70,13 +81,56 @@ describe('MessageAnAgent Node', () => {
]);
});
it('should throw NodeOperationError when message is empty', async () => {
it('should forward a user-supplied sessionId from the Advanced collection', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return ' ';
return undefined;
if (param === 'message') return 'Hello agent';
if (param === 'advanced') return { sessionId: ' thread-42 ' };
return undefined as unknown as string;
});
executeFunctions.executeAgent.mockResolvedValue(mockAgentResult);
await node.execute.call(executeFunctions);
expect(executeFunctions.executeAgent).toHaveBeenCalledWith(
{ agentId: 'agent-1', sessionId: 'thread-42' },
'Hello agent',
'exec-123',
0,
);
});
it('should treat a whitespace-only sessionId as no override', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return 'Hello agent';
if (param === 'advanced') return { sessionId: ' ' };
return undefined as unknown as string;
});
executeFunctions.executeAgent.mockResolvedValue(mockAgentResult);
await node.execute.call(executeFunctions);
expect(executeFunctions.executeAgent).toHaveBeenCalledWith(
{ agentId: 'agent-1', sessionId: undefined },
'Hello agent',
'exec-123',
0,
);
});
it('should throw NodeOperationError when message is empty', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation(
(param: string, _itemIndex?: number, fallback?: unknown) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return ' ';
if (param === 'advanced') return fallback ?? {};
return undefined;
},
);
executeFunctions.continueOnFail.mockReturnValue(false);
await expect(node.execute.call(executeFunctions)).rejects.toThrow(NodeOperationError);
@ -85,11 +139,14 @@ describe('MessageAnAgent Node', () => {
it('should process multiple items with different itemIndex values', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }, { json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string, itemIndex: number) => {
if (param === 'agentId') return { mode: 'id', value: `agent-${itemIndex + 1}` };
if (param === 'message') return `Message ${itemIndex + 1}`;
return undefined;
});
executeFunctions.getNodeParameter.mockImplementation(
(param: string, itemIndex?: number, fallback?: unknown) => {
if (param === 'agentId') return { mode: 'id', value: `agent-${(itemIndex ?? 0) + 1}` };
if (param === 'message') return `Message ${(itemIndex ?? 0) + 1}`;
if (param === 'advanced') return fallback ?? {};
return undefined;
},
);
const resultForItem0: ExecuteAgentData = {
...mockAgentResult,
@ -108,13 +165,13 @@ describe('MessageAnAgent Node', () => {
expect(executeFunctions.executeAgent).toHaveBeenCalledTimes(2);
expect(executeFunctions.executeAgent).toHaveBeenCalledWith(
{ agentId: 'agent-1' },
{ agentId: 'agent-1', sessionId: undefined },
'Message 1',
'exec-123',
0,
);
expect(executeFunctions.executeAgent).toHaveBeenCalledWith(
{ agentId: 'agent-2' },
{ agentId: 'agent-2', sessionId: undefined },
'Message 2',
'exec-123',
1,
@ -128,11 +185,14 @@ describe('MessageAnAgent Node', () => {
it('should return error item instead of throwing when continueOnFail is true', async () => {
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return 'Hello';
return undefined;
});
executeFunctions.getNodeParameter.mockImplementation(
(param: string, _itemIndex?: number, fallback?: unknown) => {
if (param === 'agentId') return { mode: 'id', value: 'agent-1' };
if (param === 'message') return 'Hello';
if (param === 'advanced') return fallback ?? {};
return undefined;
},
);
executeFunctions.continueOnFail.mockReturnValue(true);
executeFunctions.executeAgent.mockRejectedValue(new Error('Agent unavailable'));
@ -156,11 +216,14 @@ describe('MessageAnAgent Node', () => {
};
executeFunctions.getInputData.mockReturnValue([{ json: {} }]);
executeFunctions.getNodeParameter.mockImplementation((param: string) => {
if (param === 'agentId') return { mode: 'list', value: 'agent-1' };
if (param === 'message') return 'Structured query';
return undefined;
});
executeFunctions.getNodeParameter.mockImplementation(
(param: string, _itemIndex?: number, fallback?: unknown) => {
if (param === 'agentId') return { mode: 'list', value: 'agent-1' };
if (param === 'message') return 'Structured query';
if (param === 'advanced') return fallback ?? {};
return undefined;
},
);
executeFunctions.executeAgent.mockResolvedValue(structuredResult);
const result = await node.execute.call(executeFunctions);

View File

@ -8,8 +8,7 @@ import {
} from 'n8n-workflow';
import { apiRequest } from './GenericFunctions';
// import { createHmac } from 'crypto';
import { verifySignature } from './TrelloTriggerHelpers';
export class TrelloTrigger implements INodeType {
description: INodeTypeDescription = {
@ -147,22 +146,17 @@ export class TrelloTrigger implements INodeType {
};
}
const isSignatureValid = await verifySignature.call(this);
if (!isSignatureValid) {
const res = this.getResponseObject();
res.status(401).send('Unauthorized').end();
return {
noWebhookResponse: true,
};
}
const bodyData = this.getBodyData();
// TODO: Check why that does not work as expected even though it gets done as described
// https://developers.trello.com/page/webhooks
//const credentials = await this.getCredentials('trelloApi');
// // Check if the request is valid
// const headerData = this.getHeaderData() as IDataObject;
// const webhookUrl = this.getNodeWebhookUrl('default');
// const checkContent = JSON.stringify(bodyData) + webhookUrl;
// const computedSignature = createHmac('sha1', credentials.oauthSecret as string).update(checkContent).digest('base64');
// if (headerData['x-trello-webhook'] !== computedSignature) {
// // Signature is not valid so ignore call
// return {};
// }
return {
workflowData: [this.helpers.returnJsonArray(bodyData)],
};

View File

@ -0,0 +1,38 @@
import { createHmac } from 'crypto';
import type { IWebhookFunctions } from 'n8n-workflow';
import { verifySignature as verifySignatureGeneric } from '../../utils/webhook-signature-verification';
export async function verifySignature(this: IWebhookFunctions): Promise<boolean> {
const credential = await this.getCredentials('trelloApi');
const req = this.getRequestObject();
const secret = credential.oauthSecret;
return verifySignatureGeneric({
getExpectedSignature: () => {
if (!secret || typeof secret !== 'string' || !req.rawBody) {
return null;
}
const callbackURL = this.getNodeWebhookUrl('default');
const rawBody = Buffer.isBuffer(req.rawBody)
? req.rawBody.toString('utf-8')
: typeof req.rawBody === 'string'
? req.rawBody
: null;
if (!rawBody) {
return null;
}
return createHmac('sha1', secret)
.update(rawBody + callbackURL)
.digest('base64');
},
skipIfNoExpectedSignature: !secret || typeof secret !== 'string',
getActualSignature: () => {
const sig = req.header('x-trello-webhook');
return typeof sig === 'string' ? sig : null;
},
});
}

View File

@ -0,0 +1,76 @@
import { TrelloTrigger } from '../TrelloTrigger.node';
jest.mock('../TrelloTriggerHelpers', () => ({
verifySignature: jest.fn(),
}));
import { verifySignature } from '../TrelloTriggerHelpers';
const mockedVerifySignature = jest.mocked(verifySignature);
describe('TrelloTrigger', () => {
let trelloTrigger: TrelloTrigger;
let mockWebhookFunctions: any;
let mockRes: any;
beforeEach(() => {
jest.clearAllMocks();
trelloTrigger = new TrelloTrigger();
mockRes = {
status: jest.fn().mockReturnThis(),
send: jest.fn().mockReturnThis(),
end: jest.fn().mockReturnThis(),
};
mockWebhookFunctions = {
getWebhookName: jest.fn().mockReturnValue('default'),
getBodyData: jest.fn().mockReturnValue({ action: { type: 'createCard' } }),
getResponseObject: jest.fn().mockReturnValue(mockRes),
helpers: {
returnJsonArray: jest.fn().mockImplementation((data) => [{ json: data }]),
},
};
});
describe('webhook', () => {
it('should respond 200 to setup HEAD request', async () => {
mockWebhookFunctions.getWebhookName.mockReturnValue('setup');
const result = await trelloTrigger.webhook.call(mockWebhookFunctions);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.end).toHaveBeenCalled();
expect(result).toEqual({ noWebhookResponse: true });
});
it('should return workflow data when signature is valid', async () => {
mockedVerifySignature.mockResolvedValue(true);
const result = await trelloTrigger.webhook.call(mockWebhookFunctions);
expect(result).toEqual({
workflowData: [[{ json: { action: { type: 'createCard' } } }]],
});
});
it('should return 401 when signature is invalid', async () => {
mockedVerifySignature.mockResolvedValue(false);
const result = await trelloTrigger.webhook.call(mockWebhookFunctions);
expect(mockRes.status).toHaveBeenCalledWith(401);
expect(mockRes.send).toHaveBeenCalledWith('Unauthorized');
expect(mockRes.end).toHaveBeenCalled();
expect(result).toEqual({ noWebhookResponse: true });
});
it('should trigger workflow when no secret is configured', async () => {
mockedVerifySignature.mockResolvedValue(true);
const result = await trelloTrigger.webhook.call(mockWebhookFunctions);
expect(result).toHaveProperty('workflowData');
});
});
});

View File

@ -0,0 +1,114 @@
import { createHmac } from 'crypto';
import { verifySignature } from '../TrelloTriggerHelpers';
describe('TrelloTriggerHelpers', () => {
let mockWebhookFunctions: any;
const testSecret = 'test-trello-oauth-secret';
const testBody = '{"action":{"type":"createCard"},"model":{"id":"abc123"}}';
const testCallbackUrl = 'https://n8n.example.com/webhook/trello';
const testSignature = createHmac('sha1', testSecret)
.update(testBody + testCallbackUrl)
.digest('base64');
beforeEach(() => {
jest.clearAllMocks();
mockWebhookFunctions = {
getCredentials: jest.fn().mockResolvedValue({
oauthSecret: testSecret,
}),
getRequestObject: jest.fn().mockReturnValue({
header: jest.fn().mockImplementation((header: string) => {
if (header === 'x-trello-webhook') return testSignature;
return null;
}),
rawBody: testBody,
}),
getNodeWebhookUrl: jest.fn().mockReturnValue(testCallbackUrl),
getNode: jest.fn().mockReturnValue({ name: 'Trello Trigger' }),
};
});
describe('verifySignature', () => {
it('should return true when no OAuth secret is configured', async () => {
mockWebhookFunctions.getCredentials.mockResolvedValue({
apiKey: 'test-key',
apiToken: 'test-token',
});
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(true);
});
it('should return true when OAuth secret is empty string', async () => {
mockWebhookFunctions.getCredentials.mockResolvedValue({
oauthSecret: '',
});
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(true);
});
it('should return true when signature is valid', async () => {
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(true);
});
it('should return false when signature is invalid', async () => {
mockWebhookFunctions.getRequestObject.mockReturnValue({
header: jest.fn().mockImplementation((header: string) => {
if (header === 'x-trello-webhook') return 'invalidsignature';
return null;
}),
rawBody: testBody,
});
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(false);
});
it('should return false when signature header is missing', async () => {
mockWebhookFunctions.getRequestObject.mockReturnValue({
header: jest.fn().mockReturnValue(null),
rawBody: testBody,
});
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(false);
});
it('should handle rawBody as Buffer', async () => {
const bodyBuffer = Buffer.from(testBody);
const bufferSignature = createHmac('sha1', testSecret)
.update(testBody + testCallbackUrl)
.digest('base64');
mockWebhookFunctions.getRequestObject.mockReturnValue({
header: jest.fn().mockImplementation((header: string) => {
if (header === 'x-trello-webhook') return bufferSignature;
return null;
}),
rawBody: bodyBuffer,
});
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(true);
});
it('should include callback URL in signature computation', async () => {
const differentCallbackUrl = 'https://different.example.com/webhook';
mockWebhookFunctions.getNodeWebhookUrl.mockReturnValue(differentCallbackUrl);
const result = await verifySignature.call(mockWebhookFunctions);
expect(result).toBe(false);
});
});
});

View File

@ -8,6 +8,7 @@ import {
} from 'n8n-workflow';
import { twilioTriggerApiRequest } from './GenericFunctions';
import { verifySignature } from './TwilioTriggerHelpers';
export class TwilioTrigger implements INodeType {
description: INodeTypeDescription = {
@ -188,6 +189,15 @@ export class TwilioTrigger implements INodeType {
};
async webhook(this: IWebhookFunctions): Promise<IWebhookResponseData> {
const isSignatureValid = await verifySignature.call(this);
if (!isSignatureValid) {
const res = this.getResponseObject();
res.status(401).send('Unauthorized').end();
return {
noWebhookResponse: true,
};
}
const bodyData = this.getBodyData();
return {

View File

@ -0,0 +1,79 @@
import { createHash, createHmac, timingSafeEqual } from 'crypto';
import type { IWebhookFunctions } from 'n8n-workflow';
import { verifySignature as verifySignatureGeneric } from '../../utils/webhook-signature-verification';
/**
* Verifies a Twilio Event Streams webhook request.
*
* Twilio signs JSON webhooks by:
* 1. Computing SHA-256 of the raw body and appending it as `bodySHA256` query param
* 2. HMAC-SHA1 over the resulting URL using the account auth token
* 3. Sending the base64-encoded result in the `X-Twilio-Signature` header
*
* Falls back to skip verification when no auth token is available
* (e.g. credential uses API Key auth) to preserve existing workflows.
*/
export async function verifySignature(this: IWebhookFunctions): Promise<boolean> {
try {
const credential = await this.getCredentials<{ authType?: string; authToken?: string }>(
'twilioApi',
);
const req = this.getRequestObject();
const authToken = credential.authToken;
if (!authToken || typeof authToken !== 'string') {
return true;
}
const rawBody = req.rawBody;
if (!rawBody) {
return false;
}
const bodyBuffer = Buffer.isBuffer(rawBody) ? rawBody : Buffer.from(rawBody);
const computedBodyHash = createHash('sha256').update(bodyBuffer).digest('hex');
const bodyHashFromQuery = req.query?.bodySHA256;
if (
typeof bodyHashFromQuery !== 'string' ||
bodyHashFromQuery.length !== computedBodyHash.length ||
!timingSafeEqual(Buffer.from(bodyHashFromQuery), Buffer.from(computedBodyHash))
) {
return false;
}
let sinkUrl = this.getNodeWebhookUrl('default');
if (!sinkUrl) {
return false;
}
const originalUrl: string = req.originalUrl ?? req.url ?? '';
// getNodeWebhookUrl always returns the production path (/webhook/...).
// In test mode the request arrives at /webhook-test/..., so adjust
// the base URL to match what was actually signed against.
const originalPath = originalUrl.split('?')[0];
if (originalPath.includes('/webhook-test/')) {
sinkUrl = sinkUrl.replace('/webhook/', '/webhook-test/');
}
const queryIdx = originalUrl.indexOf('?');
const queryString = queryIdx === -1 ? '' : originalUrl.substring(queryIdx + 1);
const signedUrl = queryString ? `${sinkUrl}?${queryString}` : sinkUrl;
return verifySignatureGeneric({
getExpectedSignature: () => {
const hmac = createHmac('sha1', authToken);
hmac.update(signedUrl);
return hmac.digest('base64');
},
getActualSignature: () => {
const sig = req.header('x-twilio-signature');
return typeof sig === 'string' ? sig : null;
},
});
} catch (error) {
return false;
}
}

View File

@ -0,0 +1,83 @@
import type { IWebhookFunctions } from 'n8n-workflow';
import { TwilioTrigger } from '../TwilioTrigger.node';
import { verifySignature } from '../TwilioTriggerHelpers';
jest.mock('../TwilioTriggerHelpers');
describe('TwilioTrigger', () => {
let trigger: TwilioTrigger;
let mockWebhookFunctions: Pick<
jest.Mocked<IWebhookFunctions>,
'getBodyData' | 'getResponseObject' | 'helpers'
>;
beforeEach(() => {
jest.clearAllMocks();
trigger = new TwilioTrigger();
mockWebhookFunctions = {
getBodyData: jest.fn(),
getResponseObject: jest.fn(),
helpers: {
returnJsonArray: jest.fn((data) => data),
} as any,
};
});
describe('webhook', () => {
it('should process the webhook when signature verification passes', async () => {
const bodyData = [
{ specversion: '1.0', type: 'com.twilio.messaging.inbound-message.received' },
];
(verifySignature as jest.Mock).mockResolvedValue(true);
mockWebhookFunctions.getBodyData.mockReturnValue(bodyData as any);
const result = await trigger.webhook.call(
mockWebhookFunctions as unknown as IWebhookFunctions,
);
expect(verifySignature).toHaveBeenCalled();
expect(result.workflowData).toBeDefined();
expect(mockWebhookFunctions.helpers.returnJsonArray).toHaveBeenCalledWith(bodyData);
});
it('should return 401 when signature verification fails', async () => {
const mockResponse = {
status: jest.fn().mockReturnThis(),
send: jest.fn().mockReturnThis(),
end: jest.fn(),
};
(verifySignature as jest.Mock).mockResolvedValue(false);
mockWebhookFunctions.getResponseObject.mockReturnValue(mockResponse as any);
const result = await trigger.webhook.call(
mockWebhookFunctions as unknown as IWebhookFunctions,
);
expect(verifySignature).toHaveBeenCalled();
expect(mockResponse.status).toHaveBeenCalledWith(401);
expect(mockResponse.send).toHaveBeenCalledWith('Unauthorized');
expect(mockResponse.end).toHaveBeenCalled();
expect(result).toEqual({ noWebhookResponse: true });
expect(mockWebhookFunctions.getBodyData).not.toHaveBeenCalled();
});
it('should process the webhook when no auth token is configured (backward compat)', async () => {
const bodyData = [
{ specversion: '1.0', type: 'com.twilio.voice.insights.call-summary.complete' },
];
(verifySignature as jest.Mock).mockResolvedValue(true);
mockWebhookFunctions.getBodyData.mockReturnValue(bodyData as any);
const result = await trigger.webhook.call(
mockWebhookFunctions as unknown as IWebhookFunctions,
);
expect(result.workflowData).toBeDefined();
});
});
});

Some files were not shown because too many files have changed in this diff Show More