fix(core): Defer credential setup during workflow builds (#30181)

This commit is contained in:
Albert Alises 2026-05-11 17:46:44 +02:00 committed by GitHub
parent 9072ee3beb
commit bb73952fcc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 1782 additions and 385 deletions

View File

@ -297,7 +297,6 @@ export async function buildInProcess(
builderTools['submit-workflow'] = createSubmitWorkflowTool(
services.context,
builderWs.workspace,
undefined,
async (attempt) => {
await workflowTaskService.reportBuildOutcome(
toWorkflowBuildOutcome(workItemId, runId, taskId, attempt),
@ -357,7 +356,7 @@ export async function buildInProcess(
},
control: {
mode: 'auto',
waitForConfirmation: async (requestId): Promise<Record<string, unknown>> => {
waitForConfirmation: async (requestId: string): Promise<Record<string, unknown>> => {
interactivity.autoApprovedSuspensions++;
traceCollector.markAutoApproved(requestId);
chunkLog?.write({ kind: 'auto-approve', requestId });

View File

@ -122,16 +122,20 @@ describe('getSystemPrompt', () => {
});
describe('post-build verify for bypassPlan', () => {
it('instructs the orchestrator to call verify-built-workflow on mockable triggers', () => {
it('uses verificationReadiness as the post-build routing signal', () => {
const prompt = getSystemPrompt({});
expect(prompt).toContain('Post-build flow');
expect(prompt).toContain('verify-built-workflow');
expect(prompt).toContain('outcome.verificationReadiness');
expect(prompt).toContain('outcome.setupRequirement');
expect(prompt).toContain('outcome.verificationReadiness.status === "ready"');
expect(prompt).toContain('outcome.verificationReadiness.status === "needs_setup"');
expect(prompt).toContain('outcome.verificationReadiness.status === "not_verifiable"');
expect(prompt).toContain('outcome.setupRequirement.status === "required"');
expect(prompt).toContain('outcome.triggerNodes');
expect(prompt).toContain('n8n-nodes-base.scheduleTrigger');
expect(prompt).toContain('n8n-nodes-base.webhook');
expect(prompt).toContain('@n8n/n8n-nodes-langchain.chatTrigger');
expect(prompt).toContain('n8n-nodes-base.formTrigger');
expect(prompt).not.toContain('outcome.usesWorkflowPinDataForVerification');
expect(prompt).not.toContain('outcome.verificationPinData');
});
it('reads workflowId/workItemId from the outcome field, not result', () => {
@ -139,24 +143,25 @@ describe('getSystemPrompt', () => {
expect(prompt).toContain('outcome.workflowId');
expect(prompt).toContain('outcome.workItemId');
expect(prompt).toContain('outcome.verification');
expect(prompt).toContain('outcome.verificationReadiness');
expect(prompt).toContain('outcome.setupRequirement');
expect(prompt).toMatch(/result.*only a short text summary/);
});
it('reuses successful structured builder verification evidence instead of re-running verify', () => {
it('reuses deterministic already-verified readiness instead of re-running verify', () => {
const prompt = getSystemPrompt({});
expect(prompt).toContain('successful structured tool evidence');
expect(prompt).toContain('outcome.verificationReadiness.status === "already_verified"');
expect(prompt).toContain('do **not** call `verify-built-workflow` again');
expect(prompt).toContain('Never trust builder prose alone');
});
it('runs verify even when mocked credentials are present', () => {
it('leaves publish dependency ordering to the workflows tool', () => {
const prompt = getSystemPrompt({});
expect(prompt).toMatch(
/Run verify even when `outcome\.mockedCredentialsByNode` is non-empty/,
expect(prompt).toContain(
'Only call `workflows(action="publish")` when the user explicitly asks',
);
expect(prompt).not.toContain('outcome.supportingWorkflowIds');
});
});
@ -170,6 +175,18 @@ describe('getSystemPrompt', () => {
expect(prompt).not.toContain('Always run your own verification');
});
it('routes verified checkpoint workflows with setup needs through workflow setup before completion', () => {
const prompt = getSystemPrompt({});
expect(prompt).toContain('workflows(action="setup")');
expect(prompt).toContain('outcome.setupRequirement.status === "required"');
expect(prompt).toContain('before `complete-checkpoint`');
expect(prompt).toContain('deferred: true');
expect(prompt).toContain(
'Do not call `credentials(action="setup")` or `apply-workflow-credentials`',
);
});
it('tells the orchestrator it may patch during a checkpoint and will re-enter the same checkpoint', () => {
const prompt = getSystemPrompt({});

View File

@ -112,7 +112,7 @@ When \`credentials(action="setup")\` returns \`needsBrowserSetup=true\`, call \`
Never use \`delegate\` to build, patch, fix, or update workflows — delegate does not have access to the builder sandbox, verification, or submit tools.
To edit an existing workflow, call \`build-workflow-with-agent\` directly with \`bypassPlan: true\`, the existing \`workflowId\`, a one-sentence \`reason\`, and a \`task\` spec describing what to change. The orchestrator verifies the result afterwards via \`verify-built-workflow\` when the trigger is mockable (see **Post-build flow**). Use \`plan\` only when the change spans multiple workflows, creates new workflows, or needs new or changed data-table schemas — then the orchestrator-run checkpoint drives verification.
To edit an existing workflow, call \`build-workflow-with-agent\` directly with \`bypassPlan: true\`, the existing \`workflowId\`, a one-sentence \`reason\`, and a \`task\` spec describing what to change. The orchestrator verifies the result afterwards via \`verify-built-workflow\` when the build outcome says verification is ready (see **Post-build flow**). Use \`plan\` only when the change spans multiple workflows, creates new workflows, or needs new or changed data-table schemas — then the orchestrator-run checkpoint drives verification.
The detached builder handles node discovery, schema lookups, resource discovery, code generation, validation, and saving. Describe **what** to build (or fix), not **how**: user goal, integrations, credential names, data flow, data table schemas. Don't specify node types or parameter configurations. Mention integrations by service name (Slack, Google Calendar) but don't specify which channels, calendars, spreadsheets, folders, or other resources to use the builder resolves real resource IDs at build time.
@ -124,21 +124,22 @@ Always pass \`conversationContext\` when spawning background agents (\`build-wor
**After spawning any background agent** (\`build-workflow-with-agent\`, \`delegate\`, \`plan\`, or \`create-tasks\`): do not write any text. The task card shows the user what's being built or done; restating it (e.g. the workflow name, what the agent will do) is redundant. Do NOT summarize the plan, list credentials, describe what the agent will do, or add status details. The agent's progress is already visible to the user in real time.
**Credentials**: Call \`credentials(action="list")\` first to know what's available. Build the workflow immediately — the builder auto-resolves available credentials and auto-mocks missing ones. Planned builder tasks handle their own verification and credential finalization flow.
**Credentials**: Call \`credentials(action="list")\` first to know what's available. Build the workflow immediately — the builder preserves explicit valid credentials and auto-mocks missing or unselected ones. Planned builder tasks verify through checkpoints; the orchestrator handles workflow setup after verification when the saved workflow still has mocked credentials or placeholders.
**Ask once when a service has multiple credentials of the same type.** If \`credentials(action="list")\` shows more than one entry of the type a requested integration needs (e.g. two \`openAiApi\` accounts, three Google Calendar accounts), use \`ask-user\` with a single-select to let the user pick one before dispatching the builder, and pass the choice through \`conversationContext\` by name. Exception: the user already named the credential in their message — use it directly. With a single candidate, auto-apply and do not ask.
${SECRET_ASK_GUARDRAIL}
**Post-build flow** (for direct \`build-workflow-with-agent\` calls with \`bypassPlan: true\`plan-driven builds handle their own setup/verify flow via the checkpoint):
**Post-build flow** (for direct \`build-workflow-with-agent\` calls with \`bypassPlan: true\`checkpoint follow-ups must apply the same setup handoff before completing):
**Publishing is never required for testing.** Both \`executions(action="run")\` and \`verify-built-workflow\` inject \`inputData\` as the trigger's output via the pin-data adapter — the workflow does not need to be active. Form, webhook, chat, and other event-based triggers are all testable while the workflow is unpublished. Never publish a workflow as a precondition for running it.
**Publishing is never required for testing.** Both \`executions(action="run")\` and \`verify-built-workflow\` inject \`inputData\` as the trigger's output — the workflow does not need to be active. Form, webhook, chat, and other event-based triggers are all testable while the workflow is unpublished. Never publish a workflow as a precondition for running it.
1. Builder finishes read \`outcome.workflowId\`, \`outcome.workItemId\`, \`outcome.triggerNodes\`, and \`outcome.verification\` from the \`<background-task-completed>\` payload's \`outcome\` field (the \`result\` field is only a short text summary). If \`outcome\` is missing, the build did not submit — skip to step 2.
- If \`outcome.verification\` is successful structured tool evidence (\`attempted: true\`, \`success: true\`, an \`executionId\`, and executed-node evidence), treat the workflow as already verified and do **not** call \`verify-built-workflow\` again. Never trust builder prose alone; only reuse the structured \`outcome.verification\` record.
- Otherwise, if any \`outcome.triggerNodes[*].nodeType\` matches \`n8n-nodes-base.scheduleTrigger\`, \`n8n-nodes-base.webhook\`, \`@n8n/n8n-nodes-langchain.chatTrigger\`, or \`n8n-nodes-base.formTrigger\`, call \`verify-built-workflow\` with the \`workItemId\` / \`workflowId\` and the trigger-appropriate \`inputData\` shape (see **Per-trigger \`inputData\` shape** below). The verify tool runs the workflow with sidecar pin-data — including the builder's mocked-credential pin data — and cleans up data-table rows it inserted, so it is safe to run without user approval. Run verify even when \`outcome.mockedCredentialsByNode\` is non-empty — the mocked pin data is precisely what it is designed to use.
- Skip verify only when: \`outcome.workflowId\` or \`outcome.workItemId\` is missing; \`outcome.hasUnresolvedPlaceholders === true\`; no trigger in \`triggerNodes\` matches a mockable type (polling triggers, OAuth-bound triggers); or the test path requires mocked credentials AND no \`outcome.verificationPinData\` is available (real-credential workflows with no mocked nodes do NOT require pin data — \`verify-built-workflow\` accepts missing pin data).
2. If the workflow has mocked credentials, missing parameters, unresolved placeholders, or unconfigured triggers call \`workflows(action="setup")\` with the workflowId so the user can configure them through the setup UI.
1. Builder finishes read \`outcome.workflowId\`, \`outcome.workItemId\`, \`outcome.triggerNodes\`, \`outcome.verificationReadiness\`, and \`outcome.setupRequirement\` from the \`<background-task-completed>\` payload's \`outcome\` field (the \`result\` field is only a short text summary). If \`outcome\` is missing, explain that the build did not submit.
- If \`outcome.verificationReadiness.status === "already_verified"\`, treat the workflow as verified and do **not** call \`verify-built-workflow\` again.
- If \`outcome.verificationReadiness.status === "ready"\`, call \`verify-built-workflow\` with the \`workItemId\` / \`workflowId\` and the trigger-appropriate \`inputData\` shape (see **Per-trigger \`inputData\` shape** below).
- If \`outcome.verificationReadiness.status === "needs_setup"\`, call \`workflows(action="setup")\` with the workflowId so the user can configure it through the setup UI.
- If \`outcome.verificationReadiness.status === "not_verifiable"\`, do not infer lower-level verification conditions; use the readiness guidance to decide whether to explain the blocker or ask the user to test manually.
2. After verification handling, if \`outcome.setupRequirement.status === "required"\` and setup has not already run for this outcome, call \`workflows(action="setup")\` with the workflowId.
3. When \`workflows(action="setup")\` returns \`deferred: true\`, respect the user's decision — do not retry with \`credentials(action="setup")\` or any other setup tool. The user chose to set things up later.
4. Ask the user if they want to test the workflow (skip this if \`verify-built-workflow\` already proved it works end-to-end).
5. Only call \`workflows(action="publish")\` when the user explicitly asks to publish. Never publish automatically.
@ -221,7 +222,7 @@ When \`<planned-task-follow-up type="synthesize">\` is present, all planned task
When \`<planned-task-follow-up type="replan">\` is present, a planned task failed and the graph is in \`awaiting_replan\`. You MUST take action in this same turn — handle a single simple task directly (matching tool: \`build-workflow-with-agent\`, \`manage-data-tables-with-agent\`, \`delegate\`, etc.), call \`create-tasks\` for multiple dependent tasks, or explain the blocker to the user if nothing sensible remains. Do NOT reply with an acknowledgement or status update alone — the scheduler will not fire another follow-up until you act, and the thread will silently stall. Apply the replan branch from \`## When to Plan\` above.
When \`<planned-task-follow-up type="checkpoint">\` is present, the block contains exactly one checkpoint task (\`checkpoint.id\`, \`checkpoint.title\`, \`checkpoint.instructions\`, and \`checkpoint.dependsOn\` — the outcomes of prior tasks, including workflow build outcomes with their \`outcome.workItemId\` / \`outcome.workflowId\`). **Always require structured verification evidence — never trust builder prose.** If a dependency outcome contains successful \`outcome.verification\` tool evidence (\`attempted: true\`, \`success: true\`, an \`executionId\`, and executed-node evidence), use that evidence and call \`complete-checkpoint(taskId, status: "succeeded", result, outcome)\` without re-running verification. Otherwise execute \`checkpoint.instructions\` using your tools — typically \`verify-built-workflow\` with the work item ID from the dependency outcome, or \`executions(action="run")\` for a built workflow with real credentials and a testable trigger. Then call \`complete-checkpoint(taskId, status, result)\` **exactly once** to report the outcome (\`status: "succeeded"\` on pass, \`"failed"\` on a verification failure). Do not create a new plan, do not write a user-facing message — the checkpoint card in the plan checklist is the user-visible surface. End your turn as soon as \`complete-checkpoint\` returns.
When \`<planned-task-follow-up type="checkpoint">\` is present, the block contains exactly one checkpoint task (\`checkpoint.id\`, \`checkpoint.title\`, \`checkpoint.instructions\`, and \`checkpoint.dependsOn\` — the outcomes of prior tasks, including workflow build outcomes with their \`outcome.workItemId\` / \`outcome.workflowId\`). **Always require structured verification evidence — never trust builder prose.** If a dependency outcome contains successful \`outcome.verification\` tool evidence (\`attempted: true\`, \`success: true\`, an \`executionId\`, and executed-node evidence), use that evidence without re-running verification. Otherwise execute \`checkpoint.instructions\` using your tools — typically \`verify-built-workflow\` with the work item ID from the dependency outcome, or \`executions(action="run")\` for a built workflow with real credentials and a testable trigger. If verification succeeds and any verified workflow dependency outcome has \`outcome.setupRequirement.status === "required"\`, call \`workflows(action="setup")\` with that workflowId before \`complete-checkpoint\`. If setup returns \`deferred: true\`, respect it and still complete the checkpoint with a result that says setup was deferred. Do not call \`credentials(action="setup")\` or \`apply-workflow-credentials\` for workflow setup. Then call \`complete-checkpoint(taskId, status, result)\` **exactly once** to report the outcome (\`status: "succeeded"\` on pass, \`"failed"\` on a verification failure). Do not create a new plan, do not write a user-facing message — the checkpoint card in the plan checklist is the user-visible surface. End your turn as soon as \`complete-checkpoint\` returns.
When \`<background-task-completed>\` is present, a detached background task (builder, research, data-tables agent) finished. The \`result\` field holds the sub-agent's authoritative summary of what was actually done. **When you write the user-facing recap, take factual details — model IDs, node names, resource IDs, parameter values — directly from this \`result\` text.** Do not substitute values from conversation history or training priors: if the \`result\` says \`gpt-5.4-mini\`, write \`gpt-5.4-mini\`, not "GPT-4o mini" or any other name you associate with the provider. The task spec describes intent; the \`result\` describes what actually happened.

View File

@ -1,7 +1,7 @@
import type { InstanceAiPermissions } from '@n8n/api-types';
import type { InstanceAiContext, CredentialSummary, CredentialDetail } from '../../types';
import { createCredentialsTool } from '../credentials.tool';
import { createCredentialsTool, type CredentialAction } from '../credentials.tool';
// ── Helpers ──────────────────────────────────────────────────────────────────
@ -46,9 +46,97 @@ function resumeCtx(resumeData: {
return { agent: { resumeData, suspend: jest.fn() } } as never;
}
function getInputSchema(tool: unknown): { safeParse: (input: unknown) => { success: boolean } } {
return (tool as { inputSchema: { safeParse: (input: unknown) => { success: boolean } } })
.inputSchema;
}
function getDescription(tool: unknown): string {
return (tool as { description: string }).description;
}
// ── Tests ────────────────────────────────────────────────────────────────────
describe('credentials tool', () => {
describe('action filtering', () => {
const builderCredentialActions = [
'list',
'get',
'search-types',
'test',
] as const satisfies readonly CredentialAction[];
it('should support setup by default', () => {
const tool = createCredentialsTool(createMockContext());
const schema = getInputSchema(tool);
expect(
schema.safeParse({
action: 'setup',
credentials: [{ credentialType: 'slackApi', reason: 'Send Slack messages' }],
}).success,
).toBe(true);
expect(getDescription(tool)).toContain('set up new credentials');
});
it('should describe only explicitly allowed actions', () => {
const tool = createCredentialsTool(createMockContext(), {
allowedActions: builderCredentialActions,
descriptionPrefix: 'Inspect credentials during build',
descriptionSuffix: 'Setup is handled after workflow verification.',
});
expect(getDescription(tool)).toContain('Inspect credentials during build');
expect(getDescription(tool)).not.toContain('delete');
expect(getDescription(tool)).not.toContain('set up new credentials');
});
it.each([
[{ action: 'list' }],
[{ action: 'get', credentialId: 'cred-1' }],
[{ action: 'search-types', query: 'slack' }],
[{ action: 'test', credentialId: 'cred-1' }],
])('should support explicitly allowed action %p', (input) => {
const tool = createCredentialsTool(createMockContext(), {
allowedActions: builderCredentialActions,
});
const schema = getInputSchema(tool);
expect(schema.safeParse(input).success).toBe(true);
});
it.each([
[
{
action: 'setup',
credentials: [{ credentialType: 'slackApi', reason: 'Send Slack messages' }],
},
],
[{ action: 'delete', credentialId: 'cred-1' }],
])('should reject action %p when it is not explicitly allowed', (input) => {
const tool = createCredentialsTool(createMockContext(), {
allowedActions: builderCredentialActions,
});
const schema = getInputSchema(tool);
expect(schema.safeParse(input).success).toBe(false);
});
it('should reject builder-disallowed setup at the schema boundary', () => {
const tool = createCredentialsTool(createMockContext(), {
allowedActions: builderCredentialActions,
});
const schema = getInputSchema(tool);
expect(
schema.safeParse({
action: 'setup',
credentials: [{ credentialType: 'slackApi', reason: 'Send Slack messages' }],
}).success,
).toBe(false);
});
});
// ── list ────────────────────────────────────────────────────────────────
describe('list action', () => {

View File

@ -82,8 +82,8 @@ jest.mock('../workflows/build-workflow.tool', () => ({
}));
jest.mock('../workflows.tool', () => ({
createWorkflowsTool: jest.fn((_context: unknown, scope?: string) => ({
id: scope ? `workflows-${scope}` : 'workflows',
createWorkflowsTool: jest.fn((_context: unknown, options?: unknown) => ({
id: options ? 'workflows-filtered' : 'workflows',
})),
}));
@ -135,7 +135,7 @@ describe('domain tool construction', () => {
const orchestratorTools = createOrchestratorDomainTools(context);
expect(orchestratorTools).toMatchObject({
workflows: { id: 'workflows-orchestrator' },
workflows: { id: 'workflows-filtered' },
executions: { id: 'executions' },
credentials: { id: 'credentials' },
'data-tables': { id: 'data-tables-orchestrator' },
@ -144,6 +144,29 @@ describe('domain tool construction', () => {
nodes: { id: 'nodes-orchestrator' },
'ask-user': { id: 'ask-user' },
});
const { createWorkflowsTool } = jest.requireMock('../workflows.tool');
expect(createWorkflowsTool).toHaveBeenCalledWith(context, {
allowedActions: [
'list',
'get',
'delete',
'unarchive',
'setup',
'publish',
'unpublish',
'list-versions',
'get-version',
'restore-version',
'update-version',
],
});
expect(createWorkflowsTool).toHaveBeenCalledWith(
context,
expect.objectContaining({
allowedActions: expect.not.arrayContaining(['get-as-code']),
}),
);
});
it('includes local MCP server tools in orchestrator domain tools', () => {

View File

@ -2,7 +2,7 @@ import type { InstanceAiPermissions } from '@n8n/api-types';
import type { InstanceAiContext } from '../../types';
import { analyzeWorkflow, applyNodeChanges } from '../workflows/setup-workflow.service';
import { createWorkflowsTool } from '../workflows.tool';
import { createWorkflowsTool, type WorkflowAction } from '../workflows.tool';
// Mock the setup-workflow.service module to avoid pulling in heavy dependencies
jest.mock('../workflows/setup-workflow.service', () => ({
@ -27,7 +27,17 @@ function createMockContext(
userId: 'user-1',
workflowService: {
list: jest.fn(),
get: jest.fn(),
get: jest.fn().mockResolvedValue({
id: 'wf1',
name: 'Test WF',
versionId: 'v1',
activeVersionId: null,
isArchived: false,
createdAt: '2024-01-01',
updatedAt: '2024-01-01',
nodes: [],
connections: {},
}),
getAsWorkflowJSON: jest.fn().mockResolvedValue({
name: 'Test WF',
nodes: [],
@ -78,15 +88,30 @@ function createMockContext(
} as unknown as InstanceAiContext;
}
function getInputSchema(tool: unknown): { safeParse: (input: unknown) => { success: boolean } } {
return (tool as { inputSchema: { safeParse: (input: unknown) => { success: boolean } } })
.inputSchema;
}
function getDescription(tool: unknown): string {
return (tool as { description: string }).description;
}
describe('workflows tool', () => {
beforeEach(() => {
jest.clearAllMocks();
});
describe('surface filtering', () => {
it('should support get-as-code on full surface', async () => {
describe('action filtering', () => {
const builderWorkflowActions = [
'list',
'get',
'get-as-code',
] as const satisfies readonly WorkflowAction[];
it('should support get-as-code by default', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{ action: 'get-as-code', workflowId: 'w1' } as never,
@ -99,6 +124,68 @@ describe('workflows tool', () => {
code: '// generated code',
});
});
it('should describe only explicitly allowed actions', () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, {
allowedActions: builderWorkflowActions,
descriptionPrefix: 'Inspect workflows during build',
});
expect(getDescription(tool)).toContain('Inspect workflows during build');
expect(getDescription(tool)).not.toContain('set up');
expect(getDescription(tool)).not.toContain('publish');
expect(getDescription(tool)).not.toContain('archive');
});
it.each([
[{ action: 'list' }],
[{ action: 'get', workflowId: 'w1' }],
[{ action: 'get-as-code', workflowId: 'w1' }],
])('should support explicitly allowed action %p', (input) => {
const context = createMockContext();
const tool = createWorkflowsTool(context, {
allowedActions: builderWorkflowActions,
});
const schema = getInputSchema(tool);
expect(schema.safeParse(input).success).toBe(true);
});
it.each([
[{ action: 'setup', workflowId: 'w1' }],
[{ action: 'publish', workflowId: 'w1' }],
[{ action: 'unpublish', workflowId: 'w1' }],
[{ action: 'delete', workflowId: 'w1' }],
[{ action: 'unarchive', workflowId: 'w1' }],
[{ action: 'list-versions', workflowId: 'w1' }],
[{ action: 'get-version', workflowId: 'w1', versionId: 'v1' }],
[{ action: 'restore-version', workflowId: 'w1', versionId: 'v1' }],
[{ action: 'update-version', workflowId: 'w1', versionId: 'v1', name: 'v1' }],
])('should reject action %p when it is not explicitly allowed', (input) => {
const context = createMockContext();
context.workflowService.listVersions = jest.fn();
context.workflowService.getVersion = jest.fn();
context.workflowService.restoreVersion = jest.fn();
context.workflowService.updateVersion = jest.fn();
const tool = createWorkflowsTool(context, {
allowedActions: builderWorkflowActions,
});
const schema = getInputSchema(tool);
expect(schema.safeParse(input).success).toBe(false);
});
it('should reject builder-disallowed publish at the schema boundary', () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, {
allowedActions: builderWorkflowActions,
});
const schema = getInputSchema(tool);
expect(schema.safeParse({ action: 'publish', workflowId: 'w1' }).success).toBe(false);
expect(context.workflowService.publish).not.toHaveBeenCalled();
});
});
describe('version actions', () => {
@ -109,7 +196,7 @@ describe('workflows tool', () => {
context.workflowService.getVersion = jest.fn();
context.workflowService.restoreVersion = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{ action: 'list-versions', workflowId: 'w1' } as never,
{} as never,
@ -127,7 +214,7 @@ describe('workflows tool', () => {
context.workflowService.restoreVersion = jest.fn();
context.workflowService.updateVersion = jest.fn().mockResolvedValue({ success: true });
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{
action: 'update-version',
@ -150,7 +237,7 @@ describe('workflows tool', () => {
context.workflowService.updateVersion = jest.fn().mockResolvedValue({ success: true });
const suspend = jest.fn().mockResolvedValue(undefined);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!(
{
action: 'update-version',
@ -183,7 +270,7 @@ describe('workflows tool', () => {
});
context.workflowService.updateVersion = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{
action: 'update-version',
@ -202,7 +289,7 @@ describe('workflows tool', () => {
const context = createMockContext();
context.workflowService.updateVersion = jest.fn().mockResolvedValue({ success: true });
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{
action: 'update-version',
@ -221,7 +308,7 @@ describe('workflows tool', () => {
const context = createMockContext();
context.workflowService.updateVersion = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!(
{
action: 'update-version',
@ -257,7 +344,7 @@ describe('workflows tool', () => {
const context = createMockContext();
(context.workflowService.list as jest.Mock).mockResolvedValue(workflows);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'list', query: 'test', limit: 10 }, {} as never);
expect(context.workflowService.list).toHaveBeenCalledWith({ limit: 10, query: 'test' });
@ -268,7 +355,7 @@ describe('workflows tool', () => {
const context = createMockContext();
(context.workflowService.list as jest.Mock).mockResolvedValue([]);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'list', status: 'archived' }, {} as never);
expect(context.workflowService.list).toHaveBeenCalledWith({ status: 'archived' });
@ -278,7 +365,7 @@ describe('workflows tool', () => {
const context = createMockContext();
(context.workflowService.list as jest.Mock).mockResolvedValue([]);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'list', status: 'all' }, {} as never);
expect(context.workflowService.list).toHaveBeenCalledWith({ status: 'all' });
@ -301,7 +388,7 @@ describe('workflows tool', () => {
const context = createMockContext();
(context.workflowService.get as jest.Mock).mockResolvedValue(detail);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'get', workflowId: 'wf1' }, {} as never);
expect(context.workflowService.get).toHaveBeenCalledWith('wf1');
@ -315,7 +402,7 @@ describe('workflows tool', () => {
permissions: { deleteWorkflow: 'blocked' },
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'delete', workflowId: 'wf1' }, {} as never);
expect(result).toEqual({
@ -333,7 +420,7 @@ describe('workflows tool', () => {
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'delete', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -356,7 +443,7 @@ describe('workflows tool', () => {
(context.workflowService.get as jest.Mock).mockRejectedValue(new Error('not found'));
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'delete', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -370,7 +457,7 @@ describe('workflows tool', () => {
it('should archive when approved via resume', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'delete', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
@ -382,7 +469,7 @@ describe('workflows tool', () => {
it('should return denied when user rejects', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'delete', workflowId: 'wf1' }, {
agent: { resumeData: { approved: false } },
} as never);
@ -401,7 +488,7 @@ describe('workflows tool', () => {
permissions: { deleteWorkflow: 'blocked' },
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unarchive', workflowId: 'wf1' }, {} as never);
expect(result).toEqual({
@ -420,7 +507,7 @@ describe('workflows tool', () => {
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unarchive', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -448,7 +535,7 @@ describe('workflows tool', () => {
const suspension = { suspended: true };
const suspend = jest.fn().mockResolvedValue(suspension);
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unarchive', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -460,7 +547,7 @@ describe('workflows tool', () => {
it('should unarchive when approved via resume', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unarchive', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
@ -472,7 +559,7 @@ describe('workflows tool', () => {
it('should return denied when user rejects', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unarchive', workflowId: 'wf1' }, {
agent: { resumeData: { approved: false } },
} as never);
@ -492,7 +579,7 @@ describe('workflows tool', () => {
permissions: { publishWorkflow: 'blocked' },
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {} as never);
expect(result).toEqual({
@ -508,7 +595,7 @@ describe('workflows tool', () => {
activeVersionId: 'v2',
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
@ -516,7 +603,111 @@ describe('workflows tool', () => {
expect(context.workflowService.publish).toHaveBeenCalledWith('wf1', {
versionId: undefined,
});
expect(result).toEqual({ success: true, activeVersionId: 'v2' });
expect(result).toEqual({
success: true,
activeVersionId: 'v2',
publishedWorkflowIds: ['wf1'],
});
});
it('should publish direct Execute Workflow dependencies before the main workflow', async () => {
const context = createMockContext();
(context.workflowService.getAsWorkflowJSON as jest.Mock).mockResolvedValue({
name: 'Parent',
nodes: [
{
name: 'Call A',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: 'sub-a' },
},
{
name: 'Call B',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: { value: 'sub-b' } },
},
{
name: 'Call A Again',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: 'sub-a' },
},
],
connections: {},
});
(context.workflowService.publish as jest.Mock).mockResolvedValue({
activeVersionId: 'v-main',
});
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
expect(context.workflowService.publish).toHaveBeenNthCalledWith(1, 'sub-a');
expect(context.workflowService.publish).toHaveBeenNthCalledWith(2, 'sub-b');
expect(context.workflowService.publish).toHaveBeenNthCalledWith(3, 'wf1', {
versionId: undefined,
});
expect(result).toEqual({
success: true,
activeVersionId: 'v-main',
publishedWorkflowIds: ['sub-a', 'sub-b', 'wf1'],
supportingWorkflowIds: ['sub-a', 'sub-b'],
});
});
it('should roll back direct Execute Workflow dependencies when the main workflow publish fails', async () => {
const context = createMockContext();
(context.workflowService.getAsWorkflowJSON as jest.Mock).mockResolvedValue({
name: 'Parent',
nodes: [
{
name: 'Call A',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: 'sub-a' },
},
{
name: 'Call B',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: 'sub-b' },
},
],
connections: {},
});
(context.workflowService.get as jest.Mock).mockImplementation((workflowId: string) => ({
id: workflowId,
name: workflowId,
versionId: `${workflowId}-draft`,
activeVersionId: workflowId === 'sub-a' ? 'sub-a-previous' : null,
isArchived: false,
createdAt: '2024-01-01',
updatedAt: '2024-01-01',
nodes: [],
connections: {},
}));
(context.workflowService.publish as jest.Mock).mockImplementation((workflowId: string) => {
if (workflowId === 'wf1') throw new Error('Main publish failed');
return { activeVersionId: `${workflowId}-active` };
});
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
expect(context.workflowService.publish).toHaveBeenNthCalledWith(1, 'sub-a');
expect(context.workflowService.publish).toHaveBeenNthCalledWith(2, 'sub-b');
expect(context.workflowService.publish).toHaveBeenNthCalledWith(3, 'wf1', {
versionId: undefined,
});
expect(context.workflowService.unpublish).toHaveBeenCalledWith('sub-b');
expect(context.workflowService.publish).toHaveBeenNthCalledWith(4, 'sub-a', {
versionId: 'sub-a-previous',
});
expect(result).toEqual({
success: false,
error: 'Main publish failed',
rolledBackWorkflowIds: ['sub-b', 'sub-a'],
});
});
it('should suspend for confirmation using the looked-up workflow name', async () => {
@ -527,7 +718,7 @@ describe('workflows tool', () => {
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -539,6 +730,36 @@ describe('workflows tool', () => {
severity: 'warning',
});
});
it('should include direct Execute Workflow dependencies in publish confirmation', async () => {
const context = createMockContext();
(context.workflowService.get as jest.Mock).mockResolvedValue({
id: 'wf1',
name: 'My WF',
});
(context.workflowService.getAsWorkflowJSON as jest.Mock).mockResolvedValue({
name: 'Parent',
nodes: [
{
name: 'Call A',
type: 'n8n-nodes-base.executeWorkflow',
parameters: { source: 'database', workflowId: 'sub-a' },
},
],
connections: {},
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'publish', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
expect(suspend.mock.calls[0][0]).toMatchObject({
message: 'Publish workflow "My WF" (ID: wf1) and 1 referenced supporting workflow(s)?',
severity: 'warning',
});
});
});
describe('setup action', () => {
@ -562,7 +783,7 @@ describe('workflows tool', () => {
const context = createMockContext();
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -590,7 +811,7 @@ describe('workflows tool', () => {
const context = createMockContext();
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -604,7 +825,7 @@ describe('workflows tool', () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: { resumeData: undefined },
} as never);
@ -636,7 +857,7 @@ describe('workflows tool', () => {
connections: {},
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: {
resumeData: {
@ -658,7 +879,7 @@ describe('workflows tool', () => {
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);
@ -674,7 +895,7 @@ describe('workflows tool', () => {
permissions: { updateWorkflow: 'blocked' },
});
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'setup', workflowId: 'wf1' }, {
agent: {
resumeData: {
@ -694,7 +915,7 @@ describe('workflows tool', () => {
it('should unpublish when approved', async () => {
const context = createMockContext();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
const result = await tool.execute!({ action: 'unpublish', workflowId: 'wf1' }, {
agent: { resumeData: { approved: true } },
} as never);
@ -711,7 +932,7 @@ describe('workflows tool', () => {
});
const suspend = jest.fn();
const tool = createWorkflowsTool(context, 'full');
const tool = createWorkflowsTool(context);
await tool.execute!({ action: 'unpublish', workflowId: 'wf1' }, {
agent: { suspend, resumeData: undefined },
} as never);

View File

@ -125,18 +125,100 @@ const testAction = z.object({
credentialId: credentialIdField,
});
const inputSchema = sanitizeInputSchema(
z.discriminatedUnion('action', [
listAction,
getAction,
deleteAction,
searchTypesAction,
setupAction,
testAction,
]),
);
const CREDENTIAL_ACTION_SCHEMAS = {
list: listAction,
get: getAction,
delete: deleteAction,
'search-types': searchTypesAction,
setup: setupAction,
test: testAction,
} as const;
type Input = z.infer<typeof inputSchema>;
export type CredentialAction = keyof typeof CREDENTIAL_ACTION_SCHEMAS;
type CredentialActionSchema = z.ZodDiscriminatedUnionOption<'action'>;
export interface CredentialsToolOptions {
allowedActions?: readonly CredentialAction[];
descriptionPrefix?: string;
descriptionSuffix?: string;
}
const CREDENTIAL_ACTION_ORDER = [
'list',
'get',
'delete',
'search-types',
'setup',
'test',
] as const satisfies readonly CredentialAction[];
const CREDENTIAL_ACTION_LABELS = {
list: 'list',
get: 'get',
delete: 'delete',
'search-types': 'search available types',
setup: 'set up new credentials',
test: 'test connections',
} satisfies Record<CredentialAction, string>;
function getCredentialActions(options: CredentialsToolOptions): CredentialAction[] {
if (!options.allowedActions) return [...CREDENTIAL_ACTION_ORDER];
const allowedActions = new Set(options.allowedActions);
return CREDENTIAL_ACTION_ORDER.filter((action) => allowedActions.has(action));
}
function createCredentialInputSchema(actions: readonly CredentialAction[]) {
const actionSchemas: CredentialActionSchema[] = actions.map(
(action) => CREDENTIAL_ACTION_SCHEMAS[action],
);
if (actionSchemas.length === 0) {
throw new Error('Credentials tool requires at least one allowed action');
}
if (actionSchemas.length === 1) {
return sanitizeInputSchema(actionSchemas[0]);
}
return sanitizeInputSchema(
z.discriminatedUnion(
'action',
actionSchemas as [
CredentialActionSchema,
CredentialActionSchema,
...CredentialActionSchema[],
],
),
);
}
type Input =
| z.infer<typeof listAction>
| z.infer<typeof getAction>
| z.infer<typeof deleteAction>
| z.infer<typeof searchTypesAction>
| z.infer<typeof setupAction>
| z.infer<typeof testAction>;
function buildInputSchema(options: CredentialsToolOptions) {
return createCredentialInputSchema(getCredentialActions(options));
}
function formatActionList(actions: readonly CredentialAction[]): string {
const labels = actions.map((action) => CREDENTIAL_ACTION_LABELS[action]);
if (labels.length <= 2) return labels.join(' and ');
const lastLabel = labels[labels.length - 1];
return `${labels.slice(0, -1).join(', ')}, and ${lastLabel}`;
}
function getToolDescription(options: CredentialsToolOptions): string {
const actionList = formatActionList(getCredentialActions(options));
const description = `${options.descriptionPrefix ?? 'Manage credentials'}${actionList}.`;
return options.descriptionSuffix ? `${description} ${options.descriptionSuffix}` : description;
}
// ── Suspend / resume schemas (superset covering delete + setup) ────────────
@ -345,11 +427,15 @@ async function handleTest(context: InstanceAiContext, input: Extract<Input, { ac
// ── Tool factory ───────────────────────────────────────────────────────────
export function createCredentialsTool(context: InstanceAiContext) {
export function createCredentialsTool(
context: InstanceAiContext,
options: CredentialsToolOptions = {},
) {
const inputSchema = buildInputSchema(options);
return createTool({
id: CREDENTIALS_TOOL_ID,
description:
'Manage credentials — list, get, delete, search available types, set up new credentials, and test connections.',
description: getToolDescription(options),
inputSchema,
suspendSchema,
resumeSchema,

View File

@ -21,16 +21,30 @@ import { createAskUserTool } from './shared/ask-user.tool';
import { createTaskControlTool } from './task-control.tool';
import { createApplyWorkflowCredentialsTool } from './workflows/apply-workflow-credentials.tool';
import { createBuildWorkflowTool } from './workflows/build-workflow.tool';
import { createWorkflowsTool } from './workflows.tool';
import { createWorkflowsTool, type WorkflowAction } from './workflows.tool';
import { createWorkspaceTool } from './workspace.tool';
function hasParseableAttachment(context: InstanceAiContext): boolean {
return context.currentUserAttachments?.some(isParseableAttachment) ?? false;
}
const ORCHESTRATOR_WORKFLOW_ACTIONS = [
'list',
'get',
'delete',
'unarchive',
'setup',
'publish',
'unpublish',
'list-versions',
'get-version',
'restore-version',
'update-version',
] as const satisfies readonly WorkflowAction[];
/**
* Creates all native n8n domain tools with the full action surface.
* Used for delegate/builder tool resolution sub-agents get unrestricted access.
* Agents with narrower surfaces pass explicit action lists at their wiring sites.
*/
export function createAllTools(context: InstanceAiContext): ToolsInput {
return {
@ -54,7 +68,9 @@ export function createAllTools(context: InstanceAiContext): ToolsInput {
*/
export function createOrchestratorDomainTools(context: InstanceAiContext): ToolsInput {
return {
workflows: createWorkflowsTool(context, 'orchestrator'),
workflows: createWorkflowsTool(context, {
allowedActions: ORCHESTRATOR_WORKFLOW_ACTIONS,
}),
executions: createExecutionsTool(context),
credentials: createCredentialsTool(context),
'data-tables': createDataTablesTool(context, 'orchestrator'),

View File

@ -33,7 +33,10 @@ const {
shouldRecoverSavedWorkflowAfterFailedSubmit,
createBuildWorkflowAgentTool,
buildWarmBuilderFollowUp,
determineSetupRequirement,
determineVerificationReadiness,
mergeLatestVerificationIntoOutcome,
supportingWorkflowIdsFromSubmitAttempts,
} =
// eslint-disable-next-line @typescript-eslint/no-require-imports, @typescript-eslint/consistent-type-imports
require('../build-workflow-agent.tool') as typeof import('../build-workflow-agent.tool');
@ -117,6 +120,7 @@ describe('buildWarmBuilderFollowUp', () => {
expect(briefing).toContain('Do NOT stop after a successful submit without verifying');
expect(briefing).toContain('verify-built-workflow');
expect(briefing).toContain('nodes(action="explore-resources")');
expect(briefing).not.toContain('workflows(action="publish")');
expect(briefing).toContain('<requested-change>');
expect(briefing).toContain('Change the Gmail recipient');
});
@ -175,14 +179,132 @@ describe('mergeLatestVerificationIntoOutcome', () => {
});
});
describe('determineVerificationReadiness', () => {
it('marks a mockable trigger as ready without exposing pin-data details to the prompt', () => {
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
mockedCredentialTypes: ['slackApi'],
mockedCredentialsByNode: { Slack: ['slackApi'] },
verificationPinData: { Slack: [{ _mockedCredential: 'slackApi' }] },
}),
).toEqual({ status: 'ready' });
});
it('accepts saved workflow pin data as verification support for mocked credentials', () => {
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
mockedCredentialTypes: ['slackApi'],
mockedCredentialsByNode: { Slack: ['slackApi'] },
usesWorkflowPinDataForVerification: true,
}),
).toEqual({ status: 'ready' });
});
it('routes unresolved placeholders and unverifiable mocked credentials to setup', () => {
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
hasUnresolvedPlaceholders: true,
}),
).toMatchObject({
status: 'needs_setup',
reason: 'unresolved-placeholders',
});
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
mockedCredentialTypes: ['slackApi'],
}),
).toMatchObject({
status: 'needs_setup',
reason: 'missing-mocked-credential-pin-data',
});
});
it('marks successful structured verification as already verified', () => {
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
verification: {
attempted: true,
success: true,
executionId: 'exec-1',
evidence: { nodesExecuted: ['Webhook', 'Slack'] },
},
}),
).toEqual({ status: 'already_verified' });
});
it('marks non-mockable triggers as not verifiable by the post-build flow', () => {
expect(
determineVerificationReadiness({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Github Trigger', nodeType: 'n8n-nodes-base.githubTrigger' }],
}),
).toMatchObject({
status: 'not_verifiable',
reason: 'non-mockable-trigger',
});
});
});
describe('determineSetupRequirement', () => {
it('requires setup for mocked credentials even when verification can run', () => {
expect(
determineSetupRequirement({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
mockedCredentialTypes: ['slackApi'],
mockedCredentialsByNode: { Slack: ['slackApi'] },
verificationPinData: { Slack: [{ _mockedCredential: 'slackApi' }] },
}),
).toMatchObject({
status: 'required',
reason: 'mocked-credentials',
});
});
it('does not require setup when credentials and placeholders are resolved', () => {
expect(
determineSetupRequirement({
submitted: true,
workflowId: 'workflow-1',
triggerNodes: [{ nodeName: 'Webhook', nodeType: 'n8n-nodes-base.webhook' }],
}),
).toEqual({ status: 'not_required' });
});
});
describe('resultFromPostStreamError', () => {
it('preserves the submitted workflow when the stream errors after a successful submit', () => {
const submitAttempts: SubmitWorkflowAttempt[] = [
{
filePath: '/home/daytona/workspace/chunks/fetch-weather.ts',
sourceHash: 'sub',
success: true,
workflowId: 'SUB_123',
},
{
filePath: MAIN_PATH,
sourceHash: 'abc',
success: true,
workflowId: 'WF_123',
referencedWorkflowIds: ['SUB_123'],
},
];
@ -201,6 +323,7 @@ describe('resultFromPostStreamError', () => {
taskId: 'task_test',
workflowId: 'WF_123',
submitted: true,
supportingWorkflowIds: ['SUB_123'],
});
expect(result!.text).toContain('Unauthorized');
});
@ -440,6 +563,57 @@ describe('resultFromPostStreamError', () => {
});
});
describe('supportingWorkflowIdsFromSubmitAttempts', () => {
it('collects referenced successful non-main workflow IDs once in submit order', () => {
const submitAttempts: SubmitWorkflowAttempt[] = [
{
filePath: '/home/daytona/workspace/chunks/a.ts',
sourceHash: 'a',
success: true,
workflowId: 'SUB_A',
},
{
filePath: '/home/daytona/workspace/chunks/setup.ts',
sourceHash: 'setup',
success: true,
workflowId: 'SETUP_ONLY',
},
{
filePath: '/home/daytona/workspace/chunks/b.ts',
sourceHash: 'b',
success: true,
workflowId: 'SUB_B',
},
{
filePath: '/home/daytona/workspace/chunks/a.ts',
sourceHash: 'a2',
success: true,
workflowId: 'SUB_A',
},
{
filePath: '/home/daytona/workspace/chunks/failed.ts',
sourceHash: 'f',
success: false,
errors: ['failed'],
},
{
filePath: MAIN_PATH,
sourceHash: 'main',
success: true,
workflowId: 'WF_123',
referencedWorkflowIds: ['SUB_A', 'SUB_B'],
},
];
expect(
supportingWorkflowIdsFromSubmitAttempts(submitAttempts, MAIN_PATH, 'WF_123', [
'SUB_A',
'SUB_B',
]),
).toEqual(['SUB_A', 'SUB_B']);
});
});
describe('withTerminalLoopState', () => {
it('marks a saved workflow as needing user input when verification is blocked by setup', () => {
const outcome: WorkflowBuildOutcome = {

View File

@ -0,0 +1,178 @@
// Mock heavy Mastra dependencies to inspect the builder agent wiring without
// running an LLM stream.
jest.mock('@mastra/core/agent', () => ({
Agent: jest.fn().mockImplementation(() => ({
__registerMastra: jest.fn(),
stream: jest.fn().mockResolvedValue({
fullStream: (async function* () {})(),
text: Promise.resolve('builder done'),
}),
})),
}));
jest.mock('@mastra/core/mastra', () => ({
Mastra: jest.fn(),
}));
jest.mock('@mastra/core/tools', () => ({
createTool: jest.fn((config: unknown) => config),
}));
jest.mock('@n8n/workflow-sdk', () => ({
generateWorkflowCode: jest.fn(() => '// generated code'),
}));
jest.mock('../../../agent/register-with-mastra', () => ({
registerWithMastra: jest.fn(),
}));
jest.mock('../../../stream/consume-with-hitl', () => ({
consumeStreamWithHitl: jest.fn().mockResolvedValue({
text: Promise.resolve('builder done'),
workSummary: {},
}),
}));
import { Agent } from '@mastra/core/agent';
import { DEFAULT_INSTANCE_AI_PERMISSIONS } from '@n8n/api-types';
import type { InstanceAiContext, OrchestrationContext } from '../../../types';
import { createBuildWorkflowAgentTool } from '../build-workflow-agent.tool';
type ToolSchema = {
safeParse(input: unknown): { success: boolean };
};
type SpawnedTool = {
inputSchema?: ToolSchema;
execute?: (...args: unknown[]) => unknown;
};
type SpawnedAgentConfig = {
tools: Record<string, SpawnedTool>;
};
type BuildExecutable = {
execute(input: Record<string, unknown>): Promise<{ result: string; taskId: string }>;
};
type BackgroundTaskInput = {
run: (
signal: AbortSignal,
drainCorrections: () => string[],
waitForCorrection: () => Promise<void>,
) => Promise<unknown>;
};
function createDomainContext(): InstanceAiContext {
return {
userId: 'test-user',
permissions: DEFAULT_INSTANCE_AI_PERMISSIONS,
workflowService: {
list: jest.fn(),
get: jest.fn(),
getAsWorkflowJSON: jest.fn(),
createFromWorkflowJSON: jest.fn(),
updateFromWorkflowJSON: jest.fn(),
archive: jest.fn(),
unarchive: jest.fn(),
publish: jest.fn(),
unpublish: jest.fn(),
},
executionService: {},
credentialService: {
list: jest.fn(),
get: jest.fn(),
delete: jest.fn(),
test: jest.fn(),
searchCredentialTypes: jest.fn(),
},
nodeService: {},
dataTableService: {},
} as unknown as InstanceAiContext;
}
function createContext(spawnBackgroundTask: jest.Mock): OrchestrationContext {
return {
threadId: 'test-thread',
runId: 'test-run',
userId: 'test-user',
orchestratorAgentId: 'test-agent',
modelId: 'test-model' as OrchestrationContext['modelId'],
storage: { id: 'test-storage' } as OrchestrationContext['storage'],
subAgentMaxSteps: 5,
taskStorage: {
get: jest.fn(),
save: jest.fn(),
},
eventBus: {
publish: jest.fn(),
subscribe: jest.fn(),
getEventsAfter: jest.fn(),
getNextEventId: jest.fn(),
getEventsForRun: jest.fn().mockReturnValue([]),
getEventsForRuns: jest.fn().mockReturnValue([]),
},
logger: { info: jest.fn(), warn: jest.fn(), error: jest.fn(), debug: jest.fn() },
domainContext: createDomainContext(),
domainTools: {
'build-workflow': { execute: jest.fn() },
},
spawnBackgroundTask,
abortSignal: new AbortController().signal,
} as OrchestrationContext;
}
function getSpawnedToolSchemas(): Record<string, SpawnedTool> {
const agentConfig = jest.mocked(Agent).mock.calls.at(-1)?.[0] as SpawnedAgentConfig | undefined;
if (!agentConfig) throw new Error('Builder agent was not constructed');
return agentConfig.tools;
}
describe('builder sub-agent tool surface', () => {
const originalPlanGuard = process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN;
afterEach(() => {
jest.clearAllMocks();
if (originalPlanGuard === undefined) {
delete process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN;
} else {
process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN = originalPlanGuard;
}
});
it('spawns the builder with scoped workflow and credential action schemas', async () => {
process.env.N8N_INSTANCE_AI_ENFORCE_BUILD_VIA_PLAN = 'false';
let capturedRun: BackgroundTaskInput['run'] | undefined;
const spawnBackgroundTask = jest.fn((input: BackgroundTaskInput) => {
capturedRun = input.run;
return { status: 'started', taskId: 'build-task', agentId: 'agent-builder' };
});
const context = createContext(spawnBackgroundTask);
const tool = createBuildWorkflowAgentTool(context) as unknown as BuildExecutable;
await tool.execute({ task: 'Build a Slack notifier' });
expect(capturedRun).toBeDefined();
await capturedRun?.(
new AbortController().signal,
() => [],
async () => {},
);
const tools = getSpawnedToolSchemas();
expect(
tools.workflows?.inputSchema?.safeParse({ action: 'get-as-code', workflowId: 'w1' }).success,
).toBe(true);
expect(
tools.workflows?.inputSchema?.safeParse({ action: 'publish', workflowId: 'w1' }).success,
).toBe(false);
expect(
tools.workflows?.inputSchema?.safeParse({ action: 'setup', workflowId: 'w1' }).success,
).toBe(false);
expect(
tools.credentials?.inputSchema?.safeParse({ action: 'test', credentialId: 'c1' }).success,
).toBe(true);
expect(
tools.credentials?.inputSchema?.safeParse({
action: 'setup',
credentials: [{ credentialType: 'slackApi', reason: 'Send Slack messages' }],
}).success,
).toBe(false);
});
});

View File

@ -66,4 +66,11 @@ describe('credential guardrail prompts', () => {
expect(prompt).not.toContain('## IMPORTANT: ResourceLocator Parameter Handling');
}
});
it('does not instruct the sandbox builder about publishing when publish is not on its tool surface', () => {
const prompt = createSandboxBuilderAgentPrompt('/tmp/workspace');
expect(prompt).not.toContain('workflows(action="publish")');
expect(prompt).not.toContain('Do NOT publish');
});
});

View File

@ -60,12 +60,11 @@ const NODE_CONFIGURATION_SAFETY_RULES = `## Node Configuration Safety Rules
- Use live \`nodes(action="explore-resources")\` for resource locator, list, and model fields when credentials are available.
- If a configuration is unclear after reading the definition, ask for clarification or use placeholders do not guess.`;
// The AI Agent subnode example below differs by mode:
// tool mode → `newCredential('OpenAI')`
// sandbox → raw `{ id, name }` object (newCredential() serializes to undefined)
function buildBuilderSpecificPatterns(mode: 'tool' | 'sandbox'): string {
const openAiCredExample =
mode === 'sandbox' ? "{ id: 'credId', name: 'OpenAI account' }" : "newCredential('OpenAI')";
// The AI Agent subnode example uses `newCredential()` in both modes. In sandbox
// mode the submit runner preserves unresolved credential slots for
// `submit-workflow`, so the same outlet works there too.
function buildBuilderSpecificPatterns(): string {
const openAiCredExample = "newCredential('OpenAI')";
return `## Critical Patterns (Common Mistakes)
**Pay attention to @builderHint annotations in search results and type definitions** these provide critical guidance on how to correctly configure node parameters. Write them out as notes when reviewing they prevent common configuration mistakes.
@ -273,20 +272,21 @@ ${CONNECTION_CHANGING_PARAMETERS}
${BASELINE_FLOW_CONTROL}`;
}
const BUILDER_SPECIFIC_PATTERNS_TOOL = buildBuilderSpecificPatterns('tool');
const BUILDER_SPECIFIC_PATTERNS_SANDBOX = buildBuilderSpecificPatterns('sandbox');
const BUILDER_SPECIFIC_PATTERNS = buildBuilderSpecificPatterns();
// ── Composed SDK rules from shared + local sources ───────────────────────────
// Sandbox-mode variant of WORKFLOW_RULES: rule 1 (credentials) uses raw {id, name}
// objects because `submit-workflow` runs the code natively via tsx and expects that
// form. Rules 2 and 3 are mode-agnostic and mirror the shared WORKFLOW_RULES.
// Sandbox-mode variant of WORKFLOW_RULES: rule 1 (credentials) keeps the SDK's
// `newCredential()` outlet so unresolved credentials are explicit in code and
// can be mocked by `submit-workflow`. Rules 2 and 3 are mode-agnostic and
// mirror the shared WORKFLOW_RULES.
const SANDBOX_WORKFLOW_RULES = `Follow these rules strictly when generating workflows:
1. **Always use raw credential objects from \`credentials(action="list")\`**
- Wire credentials as \`{ id, name }\` objects returned by \`credentials(action="list")\`
- NEVER use placeholder strings, fake API keys, or hardcoded auth values
- Example: \`credentials: { slackApi: { id: 'yXYBqho73obh58ZS', name: 'Slack Bot' } }\`
1. **Use \`newCredential()\` for authentication**
- If the user selected a specific credential or an existing workflow already has one, wire it as \`newCredential('Credential Name', 'credential-id')\` using the exact ID from \`credentials(action="list")\` or the pre-loaded workflow
- If no exact credential was selected, more than one credential matches, or the service needs a new credential, wire \`newCredential('Suggested Credential Name')\`; \`submit-workflow\` will mock it for verification and the orchestrator will route setup after the build
- NEVER invent credential IDs, placeholder strings, fake API keys, or hardcoded auth values
- Example: \`credentials: { slackApi: newCredential('Slack Bot') }\`
- The key (e.g. \`slackApi\`) is the credential **type** from the node type definition
2. **Trust empty item lists don't synthesize fake items**
@ -309,27 +309,14 @@ const SANDBOX_WORKFLOW_RULES = `Follow these rules strictly when generating work
- Nested control flow is supported: \`ifNode.onTrue(loopBuilder)\`, \`switchNode.onCase(0, loopBuilder)\`, and \`splitInBatches(sib).onEachBatch(ifElseBuilder)\` all compile and wire correctly. Use them when the semantics genuinely call for it, not as a workaround for empty-list handling.`;
function composeSdkRulesAndPatterns(mode: 'tool' | 'sandbox'): string {
// Shared WORKFLOW_SDK_PATTERNS uses `newCredential('X')` throughout. That
// form is correct for tool mode but serializes to undefined in sandbox mode
// (see submit-workflow.tool.ts — `NewCredentialImpl.toJSON() === undefined`).
// Prepend an override note when composing for sandbox so the LLM substitutes
// raw `{id, name}` objects in the shared examples below.
const sandboxOverride =
mode === 'sandbox'
? "> **Sandbox credential override**: The SDK pattern examples below use `newCredential('X')`. " +
"In sandbox mode, replace every `newCredential('X')` with the raw `{ id, name }` object from " +
'`credentials(action="list")`. `newCredential()` serializes to `undefined` in sandbox and will ' +
'silently drop credentials from the saved workflow.'
: null;
return [
SDK_CODE_RULES,
mode === 'sandbox' ? SANDBOX_WORKFLOW_RULES : WORKFLOW_RULES,
...(sandboxOverride ? [sandboxOverride] : []),
'## SDK Patterns Reference\n\n' + WORKFLOW_SDK_PATTERNS,
'## Expression Reference\n\n' + EXPRESSION_REFERENCE,
'## Additional Functions\n\n' + ADDITIONAL_FUNCTIONS,
NODE_CONFIGURATION_SAFETY_RULES,
mode === 'sandbox' ? BUILDER_SPECIFIC_PATTERNS_SANDBOX : BUILDER_SPECIFIC_PATTERNS_TOOL,
BUILDER_SPECIFIC_PATTERNS,
].join('\n\n');
}
@ -360,10 +347,11 @@ ${PLACEHOLDERS_RULE}
Do NOT produce visible output until step 5. All reasoning happens internally.
## Credential Rules (tool mode)
- Always use \`newCredential('Credential Name')\` for credentials, never fake keys or placeholders.
- NEVER use raw credential objects like \`{ id: '...', name: '...' }\` — that form is for sandbox mode only.
- Use \`newCredential('Credential Name', 'credential-id')\` only when the user selected a specific existing credential or the workflow already has one.
- If no exact credential was selected, more than one credential matches, or the service needs a new credential, use \`newCredential('Suggested Credential Name')\`; the build tools mock unresolved credentials for verification.
- NEVER use raw credential objects like \`{ id: '...', name: '...' }\` in tool mode.
- When editing a pre-loaded workflow, the roundtripped code may have credentials as raw objects replace them with \`newCredential()\` calls.
- Unresolved credentials (where the user chose mock data or no credential is available) will be automatically mocked via pinned data at submit time. Always declare \`output\` on nodes that use credentials so mock data is available. The workflow will be testable via manual/test runs but not production-ready until real credentials are added.
- Unresolved credentials (where the user chose mock data, no credential is available, or no explicit selection was made) will be automatically mocked via pinned data at submit time. Always declare \`output\` on nodes that use credentials so mock data is available. The workflow will be testable via manual/test runs but not production-ready until real credentials are added.
${SDK_RULES_AND_PATTERNS_TOOL}
`;
@ -477,7 +465,7 @@ Supported input types: \`string\`, \`number\`, \`boolean\`, \`array\`, \`object\
### Step 2: Submit and test the chunk
1. Write the chunk file, then submit it: \`submit-workflow\` with the chunk file path.
- Sub-workflows with \`executeWorkflowTrigger\` can be tested immediately via \`executions(action="run")\` without publishing. However, they must be **published** via \`workflows(action="publish")\` before the parent workflow can call them in production (trigger-based) executions.
- Sub-workflows with \`executeWorkflowTrigger\` can be tested immediately via \`executions(action="run")\`.
2. Run the chunk: \`executions(action="run")\` with \`inputData\` matching the trigger schema.
- **Webhook workflows**: \`inputData\` IS the request body — do NOT wrap it in \`{ body: ... }\`. The system automatically places \`inputData\` into \`{ headers, query, body: inputData }\`. So to test a webhook expecting \`{ title: "Hello" }\`, pass \`inputData: { title: "Hello" }\`. Inside the workflow, the data arrives at \`$json.body.title\`.
- **Event-based triggers** (e.g. Linear Trigger, GitHub Trigger, Slack Trigger): pass \`inputData\` matching what the trigger would normally emit. The system injects it as the trigger node's output — e.g. \`inputData: { action: "create", data: { id: "123", title: "Test issue" } }\` for a Linear Trigger. No need to rebuild the workflow with a Manual Trigger.
@ -530,14 +518,13 @@ Replace \`CHUNK_WORKFLOW_ID\` with the actual ID returned by \`submit-workflow\`
${PLACEHOLDERS_RULE}
## Setup Workflows (Create Missing Resources)
## Missing Resources
When \`nodes(action="explore-resources")\` returns no results for a required resource:
1. Use \`nodes(action="search")\` and \`nodes(action="type-definition")\` to find the "create" operation for that resource type
2. Build a one-shot setup workflow in \`chunks/setup-<resource>.ts\` using a manual trigger + the create node
3. Submit and run it extract the created resource ID from the execution result
4. Use that real resource ID in the main workflow
1. If the resource can be represented as a user choice, use \`placeholder('Select <resource>')\` and let the setup flow collect it after the build
2. If the user explicitly asked you to create the resource and the node type definition has a safe create operation, build and verify that resource-creation workflow as part of the requested work
3. Otherwise, leave the main workflow as a saved draft and mention the missing resource in the one-line completion summary
**For resources that can't be created via n8n** (e.g., Slack channels, external API resources), explain clearly in your summary what the user needs to create manually and what ID to put where.
@ -555,7 +542,7 @@ ${ASK_USER_FALLBACK}
- You CANNOT find or use n8n API keys they do not exist in the sandbox environment
- Do NOT spend time searching for API keys, config files, environment variables, or process info none of it is accessible
**All interaction with n8n is through the provided tools:** \`submit-workflow\`, \`executions(action="run" | "debug" | "get")\`, \`credentials(action="list" | "test")\`, \`nodes(action="explore-resources")\`, \`workflows(action="publish" | "unpublish")\`, \`data-tables(action="list" | "create" | "schema")\`, etc. These tools communicate with n8n internally — no HTTP required.
**All interaction with n8n is through the provided tools:** \`submit-workflow\`, \`executions(action="run" | "debug" | "get")\`, \`credentials(action="list" | "get" | "search-types" | "test")\`, \`nodes(action="explore-resources")\`, \`workflows(action="list" | "get" | "get-as-code")\`, \`data-tables(action="list" | "create" | "schema")\`, etc. These tools communicate with n8n internally — no HTTP required.
## Sandbox-Specific Rules
@ -565,15 +552,25 @@ ${ASK_USER_FALLBACK}
## Credentials (sandbox mode)
Sandbox mode uses **raw credential objects** (not \`newCredential()\`). Call \`credentials(action="list")\` early. Each credential has an \`id\`, \`name\`, and \`type\`. Wire them into nodes like this:
Sandbox mode uses \`newCredential()\` for authentication. Call \`credentials(action="list")\` early. Each credential has an \`id\`, \`name\`, and \`type\`. Wire selected existing credentials into nodes like this:
\`\`\`typescript
credentials: {
openWeatherMapApi: { id: 'yXYBqho73obh58ZS', name: 'OpenWeatherMap account' }
openWeatherMapApi: newCredential('OpenWeatherMap account', 'yXYBqho73obh58ZS')
}
\`\`\`
The key (\`openWeatherMapApi\`) is the credential **type** from the node type definition. The \`id\` and \`name\` come from \`credentials(action="list")\`.
For credentials that are not selected yet, keep the credential type key and omit the ID:
\`\`\`typescript
credentials: {
openWeatherMapApi: newCredential('OpenWeatherMap account')
}
\`\`\`
The key (\`openWeatherMapApi\`) is the credential **type** from the node type definition. Exact IDs and names come from \`credentials(action="list")\`.
Use the two-argument form only when the user selected the credential, there is exactly one matching credential, or you are preserving a credential already present on an existing workflow. If no exact credential was selected, more than one credential matches, or the service needs a new credential, use \`newCredential('Suggested Credential Name')\`; \`submit-workflow\` mocks it for verification and the orchestrator handles setup after the build.
If the required credential type is not in \`credentials(action="list")\` results, call \`credentials(action="search-types")\` with the service name (e.g. "linear", "notion") to discover available dedicated credential types. Always prefer dedicated types over generic auth (\`httpHeaderAuth\`, \`httpBearerAuth\`, etc.). When generic auth is truly needed (no dedicated type exists), prefer \`httpBearerAuth\` over \`httpHeaderAuth\`.
@ -589,13 +586,12 @@ n8n normalizes column names to snake_case (e.g., \`dayName\` → \`day_name\`).
- **Complex workflows (5+ nodes, 2+ integrations) MUST use the Compositional Workflow Pattern.** Decompose into sub-workflows, test each independently, then compose. Do NOT write everything in a single workflow.
- **If you edit code after submitting, you MUST call \`submit-workflow\` again before doing anything else (verify, run, or finish).** The system tracks file hashes — if the file changed since the last submit, your work is discarded. The sequence is always: edit → submit → then verify/run/finish.
- **Follow the runtime verification instructions in your briefing.** If the briefing says verification is required, do not stop after a successful submit.
- **Do NOT call \`workflows(action="publish")\`.** Publishing is the user's decision after they have tested the workflow. Your job ends at a successful submit.
## Mandatory Process
### For simple workflows (< 5 nodes, single integration):
1. **Discover credentials**: Call \`credentials(action="list")\`. Note each credential's \`id\`, \`name\`, and \`type\`. You'll wire these into nodes as \`credentials: { credType: { id, name } }\`. If a required credential doesn't exist, mention it in your summary.
1. **Discover credentials**: Call \`credentials(action="list")\`. Note each credential's \`id\`, \`name\`, and \`type\`. Use \`newCredential('Name', 'id')\` only for an explicitly selected, exactly matched, or existing workflow credential. For unresolved credentials, use \`newCredential('Suggested Name')\`; \`submit-workflow\` records the mocked credential and the orchestrator routes to setup after verification.
2. **Discover nodes**:
a. If the workflow fits a known category (notification, data_persistence, chatbot, scheduling, data_transformation, data_extraction, document_processing, form_input, content_generation, triage, scraping_and_research), call \`nodes(action="suggested")\` first — it returns curated node recommendations with pattern hints and configuration notes. **Pay attention to the notes** — they prevent common configuration mistakes.
@ -611,11 +607,11 @@ n8n normalizes column names to snake_case (e.g., \`dayName\` → \`day_name\`).
3. **Get node schemas**: Call \`nodes(action="type-definition")\` with ALL the node IDs you need in a single call (up to 5). For nodes with discriminators (from search results), include the \`resource\` and \`operation\` fields. **Read the definitions carefully** — they contain exact parameter names, types, required fields, valid enum values, credential types, displayOptions conditions, and \`@builderHint\` annotations with critical configuration guidance.
**Important**: Only call \`nodes(action="type-definition")\` for nodes you will actually use in the workflow. Do not speculatively fetch definitions "just in case". If a definition returns empty or an error, do not retry — proceed with the information from \`nodes(action="search")\` results instead.
4. **Resolve real resource IDs**: Check the node schemas from step 3 for parameters with \`searchListMethod\` or \`loadOptionsMethod\`. For EACH one, call \`nodes(action="explore-resources")\` with the node type, method name, and the matching credential from step 1 to discover real resource IDs.
4. **Resolve real resource IDs**: Check the node schemas from step 3 for parameters with \`searchListMethod\` or \`loadOptionsMethod\`. For EACH one, call \`nodes(action="explore-resources")\` with the node type, method name, and the matching explicit credential from step 1 to discover real resource IDs.
- **This is mandatory for: calendars, spreadsheets, channels, folders, models, databases, and any other list-based parameter.** Do NOT assume values like "primary", "default", or "General" always look up the real ID.
- **LLM models in particular** (OpenAI, Anthropic, Groq, etc.): always call \`explore-resources\` with the node's \`@searchListMethod\` when a credential for that provider is attached. The live list reflects what the credential can actually access — free/cheap tiers are often limited (e.g. an OpenAI free-tier key may only return \`gpt-5-mini\`). Picking a model ID that the credential can't access produces a broken workflow. The list is sorted newest-first; use the \`@builderHint\` as selection guidance (e.g. "prefer the GPT-5.4 family") over the live results, not as a hard-coded pick.
- Example: Google Calendar's \`calendar\` parameter uses \`searchListMethod: getCalendars\`. Call \`nodes(action="explore-resources")\` with \`methodName: "getCalendars"\` to get the actual calendar ID (e.g., "user@example.com"), not "primary".
- **Never use \`placeholder()\` or fake IDs for discoverable resources.** Create them via a setup workflow instead (see "Setup Workflows" section). For user-provided values, follow the placeholder rules in "SDK Code Rules".
- **Never use fake IDs for discoverable resources.** Use \`placeholder()\` when the user needs to choose or create the resource after the build. For user-provided values, follow the placeholder rules in "SDK Code Rules".
- **If \`explore-resources\` returns more than one match and the user did not name a specific one, use \`placeholder('Select <resource>')\` for that parameter** (e.g. \`placeholder('Select a calendar')\`, \`placeholder('Select a Slack channel')\`). Picking one silently is a guess; the setup wizard surfaces placeholders so the user can choose after the build. Only pick a single match without prompting.
- If the resource can't be created via n8n (e.g., Slack channels), explain clearly in your summary what the user needs to set up.
@ -640,16 +636,16 @@ Follow the **Compositional Workflow Pattern** above. The process becomes:
1. **Discover credentials** (same as above).
2. **Discover nodes and get schemas** (same as above).
3. **Resolve real resource IDs** (same as above call \`nodes(action="explore-resources")\` for EVERY parameter with \`searchListMethod\` or \`loadOptionsMethod\`). Never assume IDs like "primary" or "default". If a resource doesn't exist, build a setup workflow to create it.
3. **Resolve real resource IDs** (same as above call \`nodes(action="explore-resources")\` for EVERY parameter with \`searchListMethod\` or \`loadOptionsMethod\`). Never assume IDs like "primary" or "default". If a resource doesn't exist, use a placeholder unless the user explicitly asked you to create that resource.
4. **Decompose** the workflow into logical chunks. Each chunk is a standalone sub-workflow with 2-4 nodes covering one capability (e.g., "fetch and format weather data", "generate AI recommendation", "store to data table").
5. **For each chunk**:
a. Write the chunk to \`${workspaceRoot}/chunks/<name>.ts\` with an \`executeWorkflowTrigger\` and explicit input schema.
b. Run tsc.
c. Submit the chunk: \`submit-workflow\` with \`filePath\` pointing to the chunk file. Test via \`executions(action="run")\` (no publish needed for manual runs).
c. Submit the chunk: \`submit-workflow\` with \`filePath\` pointing to the chunk file. Test via \`executions(action="run")\`.
d. Fix if needed (max 2 submission fix attempts per chunk).
6. **Write the main workflow** in \`${workspaceRoot}/src/workflow.ts\` that composes chunks via \`executeWorkflow\` nodes, referencing each chunk's workflow ID.
7. **Submit** the main workflow.
8. **Done**: Output ONE sentence summarizing what was built, including the workflow ID and any known issues. Do NOT publish the user will decide when to publish after testing.
8. **Done**: Output ONE sentence summarizing what was built, including the workflow ID and any known issues.
Do NOT produce visible output until the final step. All reasoning happens internally.

View File

@ -47,21 +47,22 @@ import {
createRemediation,
type TriggerType,
type WorkflowBuildOutcome,
type WorkflowSetupRequirement,
type WorkflowVerificationReadiness,
type WorkflowLoopState,
} from '../../workflow-loop';
import type { BuilderWorkspace } from '../../workspace/builder-sandbox-factory';
import { readFileViaSandbox } from '../../workspace/sandbox-fs';
import { getWorkspaceRoot } from '../../workspace/sandbox-setup';
import {
buildCredentialSnapshot,
type CredentialEntry,
type CredentialMap,
} from '../workflows/resolve-credentials';
import { createCredentialsTool, type CredentialAction } from '../credentials.tool';
import { buildCredentialSnapshot, type CredentialEntry } from '../workflows/resolve-credentials';
import { createIdentityEnforcedSubmitWorkflowTool } from '../workflows/submit-workflow-identity';
import {
type SubmitWorkflowAttempt,
type SubmitWorkflowOutput,
} from '../workflows/submit-workflow.tool';
import { isMockableTriggerNodeType } from '../workflows/workflow-json-utils';
import { createWorkflowsTool, type WorkflowAction } from '../workflows.tool';
interface BuilderMemoryBinding {
resource: string;
@ -72,6 +73,53 @@ function createBuilderResourceId(userId: string): string {
return `${userId}:workflow-builder`;
}
const BUILDER_WORKFLOW_ACTIONS = [
'list',
'get',
'get-as-code',
] as const satisfies readonly WorkflowAction[];
const BUILDER_CREDENTIAL_ACTIONS = [
'list',
'get',
'search-types',
'test',
] as const satisfies readonly CredentialAction[];
// The builder owns its tool/action surface here. The generic tool factories only enforce
// the action list they are given, which keeps agent policy out of shared tools.
const BUILDER_SANDBOX_TOOL_NAMES = [
'nodes',
'workflows',
'credentials',
'executions',
'data-tables',
'ask-user',
] as const;
const BUILDER_TOOL_MODE_TOOL_NAMES = [
'build-workflow',
'nodes',
'workflows',
'data-tables',
'ask-user',
] as const;
function createBuilderWorkflowsTool(context: InstanceAiContext) {
return createWorkflowsTool(context, {
allowedActions: BUILDER_WORKFLOW_ACTIONS,
descriptionPrefix: 'Inspect workflows during build',
});
}
function createBuilderCredentialsTool(context: InstanceAiContext) {
return createCredentialsTool(context, {
allowedActions: BUILDER_CREDENTIAL_ACTIONS,
descriptionPrefix: 'Inspect credentials during build',
descriptionSuffix: 'Setup is handled after workflow verification.',
});
}
export function buildWarmBuilderFollowUp(input: {
task: string;
conversationContext?: string;
@ -181,15 +229,159 @@ function detectTriggerType(_attempt: SubmitWorkflowAttempt | undefined): Trigger
return 'manual_or_testable';
}
export type OutcomeForVerificationReadiness = Pick<
WorkflowBuildOutcome,
| 'submitted'
| 'workflowId'
| 'triggerNodes'
| 'mockedCredentialTypes'
| 'mockedCredentialsByNode'
| 'verificationPinData'
| 'usesWorkflowPinDataForVerification'
| 'hasUnresolvedPlaceholders'
| 'verification'
| 'remediation'
>;
function hasMockedCredentials(outcome: OutcomeForVerificationReadiness): boolean {
return (
(outcome.mockedCredentialTypes?.length ?? 0) > 0 ||
Object.keys(outcome.mockedCredentialsByNode ?? {}).length > 0
);
}
function hasCredentialVerificationData(outcome: OutcomeForVerificationReadiness): boolean {
return (
Object.keys(outcome.verificationPinData ?? {}).length > 0 ||
outcome.usesWorkflowPinDataForVerification === true
);
}
function hasSuccessfulStructuredVerification(outcome: OutcomeForVerificationReadiness): boolean {
return (
outcome.verification?.attempted === true &&
outcome.verification.success &&
!!outcome.verification.executionId
);
}
export function determineVerificationReadiness(
outcome: OutcomeForVerificationReadiness,
): WorkflowVerificationReadiness {
if (hasSuccessfulStructuredVerification(outcome)) {
return { status: 'already_verified' };
}
if (!outcome.submitted) {
return {
status: 'not_verifiable',
reason: 'not-submitted',
guidance: 'The build did not submit a workflow, so there is nothing to verify.',
};
}
if (!outcome.workflowId) {
return {
status: 'not_verifiable',
reason: 'missing-workflow-id',
guidance: 'The build outcome does not include a workflow ID.',
};
}
if (outcome.hasUnresolvedPlaceholders) {
return {
status: 'needs_setup',
reason: 'unresolved-placeholders',
guidance: 'Route the workflow through setup before verification.',
};
}
if (hasMockedCredentials(outcome) && !hasCredentialVerificationData(outcome)) {
return {
status: 'needs_setup',
reason: 'missing-mocked-credential-pin-data',
guidance: 'Route the workflow through setup because mocked credentials cannot be verified.',
};
}
if (outcome.remediation?.category === 'needs_setup') {
return {
status: 'needs_setup',
reason: 'workflow-needs-setup',
guidance: outcome.remediation.guidance,
};
}
if (!outcome.triggerNodes?.some((node) => isMockableTriggerNodeType(node.nodeType))) {
return {
status: 'not_verifiable',
reason: 'non-mockable-trigger',
guidance: 'The workflow does not have a trigger the post-build verifier can exercise.',
};
}
return { status: 'ready' };
}
export function determineSetupRequirement(
outcome: OutcomeForVerificationReadiness,
): WorkflowSetupRequirement {
if (!outcome.submitted || !outcome.workflowId) {
return { status: 'not_required' };
}
if (outcome.hasUnresolvedPlaceholders) {
return {
status: 'required',
reason: 'unresolved-placeholders',
guidance: 'Route the workflow through setup so the user can fill unresolved values.',
};
}
if (hasMockedCredentials(outcome)) {
return {
status: 'required',
reason: 'mocked-credentials',
guidance: 'Route the workflow through setup so the user can add real credentials.',
};
}
if (outcome.remediation?.category === 'needs_setup') {
return {
status: 'required',
reason: 'workflow-needs-setup',
guidance: outcome.remediation.guidance,
};
}
return { status: 'not_required' };
}
type OutcomeWithoutDeterministicRouting = Omit<
WorkflowBuildOutcome,
'verificationReadiness' | 'setupRequirement'
>;
function withDeterministicRouting(
outcome: OutcomeWithoutDeterministicRouting,
): WorkflowBuildOutcome {
return {
...outcome,
verificationReadiness: determineVerificationReadiness(outcome),
setupRequirement: determineSetupRequirement(outcome),
};
}
function buildOutcome(
workItemId: string,
runId: string,
taskId: string,
attempt: SubmitWorkflowAttempt | undefined,
finalText: string,
supportingWorkflowIds: string[] = [],
): WorkflowBuildOutcome {
if (!attempt?.success) {
return {
return withDeterministicRouting({
workItemId,
runId,
taskId,
@ -199,7 +391,7 @@ function buildOutcome(
failureSignature: attempt?.errors?.join('; '),
remediation: attempt?.remediation,
summary: finalText,
};
});
}
const placeholderRemediation = attempt.hasUnresolvedPlaceholders
? createRemediation({
@ -210,7 +402,7 @@ function buildOutcome(
'Workflow submitted successfully, but unresolved setup values remain. Stop code edits and route to workflows(action="setup").',
})
: undefined;
return {
return withDeterministicRouting({
workItemId,
runId,
taskId,
@ -224,10 +416,12 @@ function buildOutcome(
mockedCredentialsByNode: attempt.mockedCredentialsByNode,
triggerNodes: attempt.triggerNodes,
verificationPinData: attempt.verificationPinData,
usesWorkflowPinDataForVerification: attempt.usesWorkflowPinDataForVerification,
supportingWorkflowIds: supportingWorkflowIds.length > 0 ? supportingWorkflowIds : undefined,
hasUnresolvedPlaceholders: attempt.hasUnresolvedPlaceholders,
remediation: placeholderRemediation ?? attempt.remediation,
summary: finalText,
};
});
}
export function mergeLatestVerificationIntoOutcome(
@ -245,10 +439,10 @@ export function mergeLatestVerificationIntoOutcome(
return outcome;
}
return {
return withDeterministicRouting({
...outcome,
verification: latestOutcome.verification,
};
});
}
export function withTerminalLoopState(
@ -260,13 +454,13 @@ export function withTerminalLoopState(
return outcome;
}
return {
return withDeterministicRouting({
...outcome,
workflowId: outcome.workflowId ?? state.workflowId,
needsUserInput: remediation.category === 'needs_setup',
blockingReason: remediation.guidance,
remediation,
};
});
}
async function finalBuildOutcome(
@ -299,8 +493,16 @@ async function buildOutcomeWithLatestVerification(
taskId: string,
attempt: SubmitWorkflowAttempt | undefined,
finalText: string,
supportingWorkflowIds: string[] = [],
): Promise<WorkflowBuildOutcome> {
const outcome = buildOutcome(workItemId, context.runId, taskId, attempt, finalText);
const outcome = buildOutcome(
workItemId,
context.runId,
taskId,
attempt,
finalText,
supportingWorkflowIds,
);
return await finalBuildOutcome(context, workItemId, outcome);
}
@ -341,14 +543,10 @@ The system tracks file hashes. If you edit the code and then call \`executions(a
### Resource discovery
Before writing code that uses external services, **resolve real resource IDs**:
- Call \`nodes(action="explore-resources")\` for any parameter with searchListMethod (calendars, spreadsheets, channels, models, etc.)
- Call \`nodes(action="explore-resources")\` for any parameter with searchListMethod when a matching explicit credential is attached (calendars, spreadsheets, channels, models, etc.)
- Do NOT use "primary", "default", or any assumed identifier look up the actual value
- Call \`nodes(action="suggested")\` early if the workflow fits a known category (web_app, form_input, data_persistence, etc.) — the pattern hints prevent common mistakes
- Check @builderHint annotations in node type definitions for critical configuration guidance
### Publishing
Do NOT call \`workflows(action="publish")\` for the main workflow. Publishing is the user's decision after testing. Your job ends at a successful submit. The only exception is sub-workflows in the compositional pattern — those must be published so the parent workflow can reference them.
`;
function hashContent(content: string | null): string {
@ -415,6 +613,30 @@ function latestMainSubmit(
return undefined;
}
export function supportingWorkflowIdsFromSubmitAttempts(
submitAttempts: SubmitWorkflowAttempt[],
mainWorkflowPath: string,
mainWorkflowId: string | undefined,
referencedWorkflowIds: string[] = [],
): string[] {
const seen = new Set<string>();
const referencedWorkflowIdSet = new Set(referencedWorkflowIds);
const supportingWorkflowIds: string[] = [];
for (const attempt of submitAttempts) {
if (!attempt.success || !attempt.workflowId) continue;
if (attempt.filePath === mainWorkflowPath) continue;
if (attempt.workflowId === mainWorkflowId) continue;
if (!referencedWorkflowIdSet.has(attempt.workflowId)) continue;
if (seen.has(attempt.workflowId)) continue;
seen.add(attempt.workflowId);
supportingWorkflowIds.push(attempt.workflowId);
}
return supportingWorkflowIds;
}
/**
* When the builder's stream errors mid-run, recover a successful-submit outcome
* from the submit-attempt history so the orchestrator doesn't redo a build that
@ -444,7 +666,19 @@ export function resultFromPostStreamError(input: {
const text = `Workflow ${attempt.workflowId} submitted successfully. A later step failed: ${errorText}`;
return {
text,
outcome: buildOutcome(input.workItemId, input.runId, input.taskId, attempt, text),
outcome: buildOutcome(
input.workItemId,
input.runId,
input.taskId,
attempt,
text,
supportingWorkflowIdsFromSubmitAttempts(
input.submitAttempts,
input.mainWorkflowPath,
attempt.workflowId,
attempt.referencedWorkflowIds,
),
),
};
}
@ -539,7 +773,19 @@ export function resultFromLaterFailedMainSubmit(input: {
`A later submit failed: ${errorText}`;
return {
text,
outcome: buildOutcome(input.workItemId, input.runId, input.taskId, preservedAttempt, text),
outcome: buildOutcome(
input.workItemId,
input.runId,
input.taskId,
preservedAttempt,
text,
supportingWorkflowIdsFromSubmitAttempts(
input.submitAttempts,
input.mainWorkflowPath,
preservedAttempt.workflowId,
preservedAttempt.referencedWorkflowIds,
),
),
};
}
@ -615,48 +861,40 @@ export async function startBuildWorkflowAgentTask(
let builderTools: ToolsInput;
let prompt = BUILDER_AGENT_PROMPT;
let credMap: CredentialMap | undefined;
let availableCredentials: CredentialEntry[] | undefined;
if (useSandbox) {
const credentialSnapshot = await buildCredentialSnapshot(domainContext.credentialService);
credMap = credentialSnapshot.map;
availableCredentials = credentialSnapshot.list;
const toolNames = [
'nodes',
'workflows',
'credentials',
'executions',
'data-tables',
'ask-user',
];
const builderWorkflowsTool = createBuilderWorkflowsTool(domainContext);
const builderCredentialsTool = createBuilderCredentialsTool(domainContext);
builderTools = {};
for (const name of toolNames) {
for (const name of BUILDER_SANDBOX_TOOL_NAMES) {
if (context.domainTools[name]) {
builderTools[name] = context.domainTools[name];
}
}
builderTools.workflows = builderWorkflowsTool;
builderTools.credentials = builderCredentialsTool;
if (context.workflowTaskService && context.domainContext) {
builderTools['verify-built-workflow'] = createVerifyBuiltWorkflowTool(context);
}
} else {
builderTools = {};
const toolNames = [
'build-workflow',
'nodes',
'workflows',
'data-tables',
'ask-user',
...(context.researchMode ? ['research'] : []),
];
const toolNames = context.researchMode
? [...BUILDER_TOOL_MODE_TOOL_NAMES, 'research']
: BUILDER_TOOL_MODE_TOOL_NAMES;
for (const name of toolNames) {
if (name in context.domainTools) {
builderTools[name] = context.domainTools[name];
}
}
if (domainContext) {
builderTools.workflows = createBuilderWorkflowsTool(domainContext);
builderTools.credentials = createBuilderCredentialsTool(domainContext);
}
if (!builderTools['build-workflow']) {
return { result: 'Error: build-workflow tool not available.', taskId: '', agentId: '' };
@ -806,15 +1044,7 @@ export async function startBuildWorkflowAgentTask(
if (!reusedBuilderSession && workflowId && domainContext) {
try {
const json = await domainContext.workflowService.getAsWorkflowJSON(workflowId);
let rawCode = generateWorkflowCode(json);
// Preserve the original id so credentials stay bound across saves.
// Stripping the id forced resolution through resolveCredentials,
// which does last-write-wins by credential type when a user has
// multiple credentials of the same type.
rawCode = rawCode.replace(
/newCredential\('([^']*)',\s*'([^']*)'\)/g,
"{ id: '$2', name: '$1' }",
);
const rawCode = generateWorkflowCode(json);
const code = `${SDK_IMPORT_STATEMENT}\n\n${rawCode}`;
if (workspace.filesystem) {
await workspace.filesystem.writeFile(`${root}/src/workflow.ts`, code, {
@ -830,7 +1060,6 @@ export async function startBuildWorkflowAgentTask(
builderTools['submit-workflow'] = createIdentityEnforcedSubmitWorkflowTool({
context: domainContext,
workspace,
credentialMap: credMap,
availableCredentials,
root,
currentRunId: context.runId,
@ -872,6 +1101,12 @@ export async function startBuildWorkflowAgentTask(
attempt.success
? 'Workflow submitted and ready for verification.'
: (attempt.errors?.join(' ') ?? 'Workflow submission failed.'),
supportingWorkflowIdsFromSubmitAttempts(
submitAttemptHistory,
mainWorkflowPath,
attempt.workflowId,
attempt.referencedWorkflowIds,
),
),
);
},
@ -1074,6 +1309,12 @@ export async function startBuildWorkflowAgentTask(
taskId,
refreshedAttempt,
finalText,
supportingWorkflowIdsFromSubmitAttempts(
submitAttemptHistory,
mainWorkflowPath,
refreshedAttempt.workflowId,
refreshedAttempt.referencedWorkflowIds,
),
);
return {
text: finalText,
@ -1144,6 +1385,12 @@ export async function startBuildWorkflowAgentTask(
taskId,
mainWorkflowAttempt,
finalText,
supportingWorkflowIdsFromSubmitAttempts(
submitAttemptHistory,
mainWorkflowPath,
mainWorkflowAttempt.workflowId,
mainWorkflowAttempt.referencedWorkflowIds,
),
);
return {
text: finalText,

View File

@ -21,6 +21,7 @@ import {
applyNodeChanges,
buildCompletedReport,
} from './workflows/setup-workflow.service';
import { getReferencedWorkflowIds } from './workflows/workflow-json-utils';
// ── Action schemas ──────────────────────────────────────────────────────────
@ -147,38 +148,125 @@ type Input =
| z.infer<typeof updateVersionAction>;
type PublishInput = z.infer<typeof publishExtendedAction>;
type PublishRollbackResult = {
rolledBackWorkflowIds: string[];
rollbackErrors: Array<{ workflowId: string; error: string }>;
};
function buildInputSchema(context: InstanceAiContext, surface: 'full' | 'orchestrator') {
export type WorkflowAction =
| 'list'
| 'get'
| 'get-as-code'
| 'delete'
| 'unarchive'
| 'setup'
| 'publish'
| 'unpublish'
| 'list-versions'
| 'get-version'
| 'restore-version'
| 'update-version';
type WorkflowActionSchema = z.ZodDiscriminatedUnionOption<'action'>;
export interface WorkflowsToolOptions {
allowedActions?: readonly WorkflowAction[];
descriptionPrefix?: string;
descriptionSuffix?: string;
}
const WORKFLOW_ACTION_ORDER = [
'list',
'get',
'get-as-code',
'delete',
'unarchive',
'setup',
'publish',
'unpublish',
'list-versions',
'get-version',
'restore-version',
'update-version',
] as const satisfies readonly WorkflowAction[];
const WORKFLOW_ACTION_LABELS = {
list: 'list',
get: 'inspect',
'get-as-code': 'convert existing workflows to TypeScript SDK code',
delete: 'archive',
unarchive: 'restore archived workflows',
setup: 'set up credentials and parameters',
publish: 'publish',
unpublish: 'unpublish',
'list-versions': 'list versions',
'get-version': 'inspect versions',
'restore-version': 'restore versions',
'update-version': 'update version metadata',
} satisfies Record<WorkflowAction, string>;
function getSupportedWorkflowActionSchemas(
context: InstanceAiContext,
): Partial<Record<WorkflowAction, WorkflowActionSchema>> {
const hasNamedVersions = !!context.workflowService.updateVersion;
const hasVersions = !!context.workflowService.listVersions;
const actions: Array<z.ZodObject<z.ZodRawShape>> = [
listAction,
getAction,
deleteAction,
unarchiveAction,
setupAction,
hasNamedVersions ? publishExtendedAction : publishBaseAction,
unpublishAction,
];
return {
list: listAction,
get: getAction,
'get-as-code': getAsCodeAction,
delete: deleteAction,
unarchive: unarchiveAction,
setup: setupAction,
publish: hasNamedVersions ? publishExtendedAction : publishBaseAction,
unpublish: unpublishAction,
...(hasVersions
? {
'list-versions': listVersionsAction,
'get-version': getVersionAction,
'restore-version': restoreVersionAction,
}
: {}),
...(hasNamedVersions ? { 'update-version': updateVersionAction } : {}),
};
}
// get-as-code excluded from orchestrator surface
if (surface !== 'orchestrator') {
actions.push(getAsCodeAction);
function getWorkflowActions(
supportedSchemas: Partial<Record<WorkflowAction, WorkflowActionSchema>>,
options: WorkflowsToolOptions,
): WorkflowAction[] {
const allowedActions = new Set(options.allowedActions ?? WORKFLOW_ACTION_ORDER);
return WORKFLOW_ACTION_ORDER.filter(
(action) => supportedSchemas[action] !== undefined && allowedActions.has(action),
);
}
function buildInputSchema(context: InstanceAiContext, options: WorkflowsToolOptions) {
const supportedSchemas = getSupportedWorkflowActionSchemas(context);
const actionSchemas: WorkflowActionSchema[] = [];
for (const action of getWorkflowActions(supportedSchemas, options)) {
const schema = supportedSchemas[action];
if (schema) actionSchemas.push(schema);
}
// Version-related actions only when the context supports them
if (hasVersions) {
actions.push(listVersionsAction);
actions.push(getVersionAction);
actions.push(restoreVersionAction);
}
if (hasNamedVersions) {
actions.push(updateVersionAction);
if (actionSchemas.length === 0) {
throw new Error('Workflows tool requires at least one allowed action');
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
return sanitizeInputSchema(z.discriminatedUnion('action', actions as any));
if (actionSchemas.length === 1) {
return sanitizeInputSchema(actionSchemas[0]);
}
return sanitizeInputSchema(
z.discriminatedUnion(
'action',
actionSchemas as unknown as [
WorkflowActionSchema,
WorkflowActionSchema,
...WorkflowActionSchema[],
],
),
);
}
// ── Handlers ────────────────────────────────────────────────────────────────
@ -517,16 +605,21 @@ async function handlePublish(
return { success: false, denied: true, reason: 'Action blocked by admin' };
}
const supportingWorkflowIds = await resolveSupportingWorkflowIds(context, input.workflowId);
const needsApproval = context.permissions?.publishWorkflow !== 'always_allow';
if (needsApproval && (resumeData === undefined || resumeData === null)) {
const workflowName = await resolveWorkflowName(context, input.workflowId);
const dependencyNote =
supportingWorkflowIds.length > 0
? ` and ${String(supportingWorkflowIds.length)} referenced supporting workflow(s)`
: '';
const suspension = await suspend?.({
requestId: nanoid(),
message: input.versionId
? `Publish version "${input.versionId}" of workflow "${workflowName}" (ID: ${input.workflowId})?`
: `Publish workflow "${workflowName}" (ID: ${input.workflowId})?`,
? `Publish version "${input.versionId}" of workflow "${workflowName}" (ID: ${input.workflowId})${dependencyNote}?`
: `Publish workflow "${workflowName}" (ID: ${input.workflowId})${dependencyNote}?`,
severity: 'warning' as const,
});
return suspension ?? { success: false, denied: true, reason: 'Awaiting confirmation' };
@ -537,16 +630,47 @@ async function handlePublish(
}
try {
const result = await context.workflowService.publish(input.workflowId, {
versionId: input.versionId,
...(hasNamedVersions
? {
name: input.name,
description: input.description,
}
: {}),
});
return { success: true, activeVersionId: result.activeVersionId };
const previousActiveVersionIds = await snapshotActiveVersionIds(context, [
...supportingWorkflowIds,
input.workflowId,
]);
const publishedSupportingWorkflowIds: string[] = [];
const publishedWorkflowIds: string[] = [];
try {
for (const supportingWorkflowId of supportingWorkflowIds) {
await context.workflowService.publish(supportingWorkflowId);
publishedSupportingWorkflowIds.push(supportingWorkflowId);
publishedWorkflowIds.push(supportingWorkflowId);
}
const result = await context.workflowService.publish(input.workflowId, {
versionId: input.versionId,
...(hasNamedVersions
? {
name: input.name,
description: input.description,
}
: {}),
});
publishedWorkflowIds.push(input.workflowId);
return {
success: true,
activeVersionId: result.activeVersionId,
publishedWorkflowIds,
...(publishedSupportingWorkflowIds.length > 0
? { supportingWorkflowIds: publishedSupportingWorkflowIds }
: {}),
};
} catch (error) {
const rollback = await rollbackPublishedWorkflows(
context,
previousActiveVersionIds,
publishedWorkflowIds,
);
return buildPublishFailure(error, rollback);
}
} catch (error) {
return {
success: false,
@ -555,6 +679,75 @@ async function handlePublish(
}
}
async function snapshotActiveVersionIds(
context: InstanceAiContext,
workflowIds: string[],
): Promise<Map<string, string | null>> {
const activeVersionIds = new Map<string, string | null>();
for (const workflowId of workflowIds) {
const workflow = await context.workflowService.get(workflowId);
activeVersionIds.set(workflowId, workflow.activeVersionId);
}
return activeVersionIds;
}
async function rollbackPublishedWorkflows(
context: InstanceAiContext,
previousActiveVersionIds: Map<string, string | null>,
publishedWorkflowIds: string[],
): Promise<PublishRollbackResult> {
const result: PublishRollbackResult = {
rolledBackWorkflowIds: [],
rollbackErrors: [],
};
for (const workflowId of publishedWorkflowIds.toReversed()) {
try {
const previousActiveVersionId = previousActiveVersionIds.get(workflowId);
if (previousActiveVersionId) {
await context.workflowService.publish(workflowId, { versionId: previousActiveVersionId });
} else {
await context.workflowService.unpublish(workflowId);
}
result.rolledBackWorkflowIds.push(workflowId);
} catch (error) {
result.rollbackErrors.push({
workflowId,
error: error instanceof Error ? error.message : 'Rollback failed',
});
}
}
return result;
}
function buildPublishFailure(error: unknown, rollback: PublishRollbackResult) {
return {
success: false,
error: error instanceof Error ? error.message : 'Publish failed',
...(rollback.rolledBackWorkflowIds.length > 0
? { rolledBackWorkflowIds: rollback.rolledBackWorkflowIds }
: {}),
...(rollback.rollbackErrors.length > 0 ? { rollbackErrors: rollback.rollbackErrors } : {}),
};
}
async function resolveSupportingWorkflowIds(
context: InstanceAiContext,
workflowId: string,
): Promise<string[]> {
try {
const workflowJson = await context.workflowService.getAsWorkflowJSON(workflowId);
return getReferencedWorkflowIds(workflowJson).filter(
(supportingWorkflowId) => supportingWorkflowId !== workflowId,
);
} catch {
return [];
}
}
async function handleUnpublish(
context: InstanceAiContext,
input: Extract<Input, { action: 'unpublish' }>,
@ -714,11 +907,32 @@ function formatFieldValue(value: string | null): string {
return `"${value}"`;
}
function formatWorkflowActionList(actions: readonly WorkflowAction[]): string {
const labels = actions.map((action) => WORKFLOW_ACTION_LABELS[action]);
if (labels.length <= 2) return labels.join(' and ');
const lastLabel = labels[labels.length - 1];
return `${labels.slice(0, -1).join(', ')}, and ${lastLabel}`;
}
function getToolDescription(context: InstanceAiContext, options: WorkflowsToolOptions): string {
const supportedSchemas = getSupportedWorkflowActionSchemas(context);
const actionList = formatWorkflowActionList(getWorkflowActions(supportedSchemas, options));
const description = `${options.descriptionPrefix ?? 'Manage workflows'}${actionList}.`;
const suffix =
options.descriptionSuffix ??
(options.descriptionPrefix
? undefined
: 'Workflow results use activeVersionId: null for unpublished workflows.');
return suffix ? `${description} ${suffix}` : description;
}
// ── Tool factory ────────────────────────────────────────────────────────────
export function createWorkflowsTool(
context: InstanceAiContext,
surface: 'full' | 'orchestrator' = 'full',
options: WorkflowsToolOptions = {},
) {
// Closure state for the setup action's suspend/resume cycle
const setupState: { currentRequestId: string | null; preTestSnapshot: WorkflowJSON | null } = {
@ -726,12 +940,11 @@ export function createWorkflowsTool(
preTestSnapshot: null,
};
const inputSchema = buildInputSchema(context, surface);
const inputSchema = buildInputSchema(context, options);
return createTool({
id: 'workflows',
description:
'Manage workflows — list, inspect, archive, restore, set up, publish, unpublish, and manage versions. Workflow results use activeVersionId: null for unpublished workflows.',
description: getToolDescription(context, options),
inputSchema,
suspendSchema,
resumeSchema,

View File

@ -1,11 +1,7 @@
import type { WorkflowJSON } from '@n8n/workflow-sdk';
import type { InstanceAiContext } from '../../../types';
import {
resolveCredentials,
type CredentialEntry,
type CredentialMap,
} from '../resolve-credentials';
import { resolveCredentials, type CredentialEntry } from '../resolve-credentials';
// ---------------------------------------------------------------------------
// Helpers
@ -45,8 +41,8 @@ function makeWorkflow(overrides: Partial<WorkflowJSON> = {}): WorkflowJSON {
// ---------------------------------------------------------------------------
describe('resolveCredentials', () => {
describe('credential map resolution', () => {
it('resolves credentials from the credential map', async () => {
describe('missing credential mocking', () => {
it('mocks missing credentials', async () => {
const json = makeWorkflow({
nodes: [
{
@ -60,16 +56,37 @@ describe('resolveCredentials', () => {
],
});
const credMap: CredentialMap = new Map([['slackApi', { id: 'cred-1', name: 'My Slack' }]]);
const result = await resolveCredentials(json, undefined, createMockContext());
const result = await resolveCredentials(json, undefined, createMockContext(), credMap);
expect(result.mockedNodeNames).toEqual([]);
expect(result.mockedCredentialTypes).toEqual([]);
expect(json.nodes[0].credentials).toEqual({
slackApi: { id: 'cred-1', name: 'My Slack' },
expect(result.mockedNodeNames).toEqual(['Slack']);
expect(result.mockedCredentialTypes).toEqual(['slackApi']);
expect(result.mockedCredentialsByNode).toEqual({ Slack: ['slackApi'] });
expect(json.nodes[0].credentials).toEqual({});
expect(result.verificationPinData).toEqual({
Slack: [{ _mockedCredential: 'slackApi' }],
});
});
it('mocks null credentials', async () => {
const json = makeWorkflow({
nodes: [
{
id: '1',
name: 'Slack',
type: 'n8n-nodes-base.slack',
typeVersion: 2,
position: [0, 0],
credentials: { slackApi: null as unknown as { id: string; name: string } },
},
],
});
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual(['Slack']);
expect(result.mockedCredentialTypes).toEqual(['slackApi']);
expect(json.nodes[0].credentials).toEqual({});
});
});
describe('existing workflow restoration', () => {
@ -101,7 +118,7 @@ describe('resolveCredentials', () => {
});
const ctx = createMockContext(existingWorkflow);
const result = await resolveCredentials(json, 'wf-123', ctx, new Map());
const result = await resolveCredentials(json, 'wf-123', ctx);
expect(result.mockedNodeNames).toEqual([]);
expect(json.nodes[0].credentials).toEqual({
@ -111,7 +128,7 @@ describe('resolveCredentials', () => {
});
describe('credential mocking with sidecar verification data', () => {
it('mocks unresolved credentials and preserves existing pinData', async () => {
it('mocks unresolved credentials and marks existing pinData as verification-ready', async () => {
const json = makeWorkflow({
nodes: [
{
@ -128,7 +145,7 @@ describe('resolveCredentials', () => {
},
});
const result = await resolveCredentials(json, undefined, createMockContext(), new Map());
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual(['Slack']);
expect(result.mockedCredentialTypes).toEqual(['slackApi']);
@ -139,8 +156,9 @@ describe('resolveCredentials', () => {
expect(json.pinData).toEqual({
Slack: [{ ok: true, channel: 'C123', message: { text: 'Hello' } }],
});
// No verification pin data needed — existing pinData suffices
// No sidecar pin data needed — existing workflow pinData suffices
expect(result.verificationPinData).toEqual({});
expect(result.usesWorkflowPinDataForVerification).toBe(true);
});
it('produces sidecar verification pinData when no existing pinData', async () => {
@ -157,7 +175,7 @@ describe('resolveCredentials', () => {
],
});
const result = await resolveCredentials(json, undefined, createMockContext(), new Map());
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual(['Gmail']);
expect(result.mockedCredentialTypes).toEqual(['gmailOAuth2Api']);
@ -185,7 +203,7 @@ describe('resolveCredentials', () => {
],
});
const result = await resolveCredentials(json, undefined, createMockContext(), new Map());
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual([]);
expect(result.mockedCredentialTypes).toEqual([]);
@ -218,7 +236,7 @@ describe('resolveCredentials', () => {
],
});
const result = await resolveCredentials(json, undefined, createMockContext(), new Map());
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual(['Slack 1', 'Slack 2']);
expect(result.mockedCredentialTypes).toEqual(['slackApi']);
@ -257,14 +275,10 @@ describe('resolveCredentials', () => {
],
});
const credMap: CredentialMap = new Map([
['slackApi', { id: 'slack-2', name: 'Backup Slack' }],
]);
const result = await resolveCredentials(
json,
undefined,
createMockContext(),
credMap,
availableCredentials,
);
@ -274,7 +288,7 @@ describe('resolveCredentials', () => {
});
});
it('mocks a synthesized raw credential id instead of replacing it with the type-map fallback', async () => {
it('mocks a synthesized raw credential id', async () => {
const json = makeWorkflow({
nodes: [
{
@ -288,12 +302,10 @@ describe('resolveCredentials', () => {
],
});
const credMap: CredentialMap = new Map([['slackApi', { id: 'slack-1', name: 'Team Slack' }]]);
const result = await resolveCredentials(
json,
undefined,
createMockContext(),
credMap,
availableCredentials,
);
@ -324,7 +336,6 @@ describe('resolveCredentials', () => {
json,
undefined,
createMockContext(),
new Map(),
availableCredentials,
);
@ -351,7 +362,6 @@ describe('resolveCredentials', () => {
json,
undefined,
createMockContext(),
new Map(),
availableCredentials,
);
@ -387,13 +397,9 @@ describe('resolveCredentials', () => {
],
});
const result = await resolveCredentials(
json,
'wf-123',
createMockContext(existingWorkflow),
new Map(),
[{ id: 'existing-slack', name: 'Existing Slack', type: 'slackApi' }],
);
const result = await resolveCredentials(json, 'wf-123', createMockContext(existingWorkflow), [
{ id: 'existing-slack', name: 'Existing Slack', type: 'slackApi' },
]);
expect(result.mockedNodeNames).toEqual([]);
expect(json.nodes[0].credentials).toEqual({
@ -402,8 +408,8 @@ describe('resolveCredentials', () => {
});
});
describe('existing workflow takes priority over credential map', () => {
it('preserves the existing credential on an edit even when the map has a different credential of the same type', async () => {
describe('existing workflow restoration priority', () => {
it('preserves the existing credential on an edit', async () => {
const json = makeWorkflow({
nodes: [
{
@ -430,12 +436,8 @@ describe('resolveCredentials', () => {
],
});
const credMap: CredentialMap = new Map([
['openAiApi', { id: 'some-other-id', name: 'Other OpenAI' }],
]);
const ctx = createMockContext(existingWorkflow);
const result = await resolveCredentials(json, 'wf-123', ctx, credMap);
const result = await resolveCredentials(json, 'wf-123', ctx);
expect(result.mockedNodeNames).toEqual([]);
expect(json.nodes[0].credentials).toEqual({
@ -444,8 +446,8 @@ describe('resolveCredentials', () => {
});
});
describe('credential map takes priority over mocking', () => {
it('uses credential map even when pinData exists', async () => {
describe('mocking with existing pinData', () => {
it('mocks missing credentials and preserves user pinData', async () => {
const json = makeWorkflow({
nodes: [
{
@ -462,20 +464,21 @@ describe('resolveCredentials', () => {
},
});
const credMap: CredentialMap = new Map([['slackApi', { id: 'real-id', name: 'Real Slack' }]]);
const result = await resolveCredentials(json, undefined, createMockContext());
const result = await resolveCredentials(json, undefined, createMockContext(), credMap);
// Should use credential map, not mock
expect(result.mockedNodeNames).toEqual([]);
expect(json.nodes[0].credentials).toEqual({
slackApi: { id: 'real-id', name: 'Real Slack' },
expect(result.mockedNodeNames).toEqual(['Slack']);
expect(result.mockedCredentialTypes).toEqual(['slackApi']);
expect(json.nodes[0].credentials).toEqual({});
expect(json.pinData).toEqual({
Slack: [{ ok: true }],
});
expect(result.verificationPinData).toEqual({});
expect(result.usesWorkflowPinDataForVerification).toBe(true);
});
});
describe('mock pinData cleanup', () => {
it('removes mock pinData when credential is resolved from credential map', async () => {
it('removes mock pinData when an explicit credential is valid for the type', async () => {
const json = makeWorkflow({
nodes: [
{
@ -484,7 +487,7 @@ describe('resolveCredentials', () => {
type: 'n8n-nodes-base.slack',
typeVersion: 2,
position: [0, 0],
credentials: { slackApi: undefined as unknown as { id: string; name: string } },
credentials: { slackApi: { id: 'real-id', name: 'Real Slack' } },
},
],
pinData: {
@ -492,14 +495,14 @@ describe('resolveCredentials', () => {
},
});
const credMap: CredentialMap = new Map([['slackApi', { id: 'real-id', name: 'Real Slack' }]]);
await resolveCredentials(json, undefined, createMockContext(), credMap);
await resolveCredentials(json, undefined, createMockContext(), [
{ id: 'real-id', name: 'Real Slack', type: 'slackApi' },
]);
// Mock pinData should be cleaned up since real credential was found
expect(json.pinData).toEqual({});
});
it('preserves user-defined pinData when credential is resolved', async () => {
it('preserves user-defined pinData when an explicit credential is valid for the type', async () => {
const json = makeWorkflow({
nodes: [
{
@ -508,7 +511,7 @@ describe('resolveCredentials', () => {
type: 'n8n-nodes-base.slack',
typeVersion: 2,
position: [0, 0],
credentials: { slackApi: undefined as unknown as { id: string; name: string } },
credentials: { slackApi: { id: 'real-id', name: 'Real Slack' } },
},
],
pinData: {
@ -516,10 +519,10 @@ describe('resolveCredentials', () => {
},
});
const credMap: CredentialMap = new Map([['slackApi', { id: 'real-id', name: 'Real Slack' }]]);
await resolveCredentials(json, undefined, createMockContext(), credMap);
await resolveCredentials(json, undefined, createMockContext(), [
{ id: 'real-id', name: 'Real Slack', type: 'slackApi' },
]);
// User-defined pinData (no _mockedCredential marker) should be preserved
expect(json.pinData).toEqual({
Slack: [{ ok: true, channel: 'C123' }],
});
@ -552,7 +555,7 @@ describe('resolveCredentials', () => {
},
});
const result = await resolveCredentials(json, undefined, createMockContext(), new Map());
const result = await resolveCredentials(json, undefined, createMockContext());
expect(result.mockedNodeNames).toEqual(['Gmail']);
expect(result.mockedCredentialTypes).toEqual(['gmailOAuth2Api']);

View File

@ -43,6 +43,7 @@ const mockedValidateWorkflow = jest.mocked(validateWorkflow);
type Executable = {
execute: (input: Record<string, unknown>) => Promise<{
success: boolean;
usesWorkflowPinDataForVerification?: boolean;
errors?: string[];
}>;
};
@ -130,7 +131,6 @@ describe('createSubmitWorkflowTool — schema validation wiring', () => {
const tool = createSubmitWorkflowTool(
context,
makeBuildSuccessWorkspace(),
new Map(),
) as unknown as Executable;
await tool.execute({ filePath: 'src/workflow.ts', name: 'Test' });
@ -145,7 +145,6 @@ describe('createSubmitWorkflowTool — schema validation wiring', () => {
const tool = createSubmitWorkflowTool(
makeContext(),
makeBuildSuccessWorkspace(),
new Map(),
) as unknown as Executable;
await tool.execute({ filePath: 'src/workflow.ts', name: 'Test' });
@ -190,7 +189,6 @@ describe('createSubmitWorkflowTool — permission enforcement', () => {
const tool = createSubmitWorkflowTool(
makeContext({ createWorkflow: 'blocked' } as InstanceAiContext['permissions']),
makeWorkspace(),
new Map(),
(attempt) => {
attempts.push(attempt);
},
@ -211,7 +209,6 @@ describe('createSubmitWorkflowTool — permission enforcement', () => {
const tool = createSubmitWorkflowTool(
makeContext({ updateWorkflow: 'blocked' } as InstanceAiContext['permissions']),
makeWorkspace(),
new Map(),
(attempt) => {
attempts.push(attempt);
},
@ -229,6 +226,91 @@ describe('createSubmitWorkflowTool — permission enforcement', () => {
});
});
describe('createSubmitWorkflowTool — credential verification metadata', () => {
it('surfaces workflow pin-data availability when mocked credentials reuse saved pin data', async () => {
mockedValidateWorkflow.mockReturnValue({ errors: [], warnings: [] } as never);
const attempts: SubmitWorkflowAttempt[] = [];
const context = makeContext({} as InstanceAiContext['permissions'], {
workflowService: {
createFromWorkflowJSON: jest.fn().mockResolvedValue({ id: 'wf-1' }),
} as unknown as InstanceAiContext['workflowService'],
});
const tool = createSubmitWorkflowTool(
context,
makeBuildSuccessWorkspace({
name: 'Test',
nodes: [
{
id: '1',
name: 'Slack',
type: 'n8n-nodes-base.slack',
typeVersion: 2,
position: [0, 0],
parameters: {},
credentials: { slackApi: null },
},
],
connections: {},
pinData: { Slack: [{ ok: true }] },
}),
(attempt) => {
attempts.push(attempt);
},
) as unknown as Executable;
const out = await tool.execute({ filePath: 'src/workflow.ts', name: 'Test' });
expect(out.success).toBe(true);
expect(out.usesWorkflowPinDataForVerification).toBe(true);
expect(attempts[0]).toMatchObject({
success: true,
workflowId: 'wf-1',
usesWorkflowPinDataForVerification: true,
});
});
it('reports Execute Workflow references from the submitted workflow', async () => {
mockedValidateWorkflow.mockReturnValue({ errors: [], warnings: [] } as never);
const attempts: SubmitWorkflowAttempt[] = [];
const context = makeContext({} as InstanceAiContext['permissions'], {
workflowService: {
createFromWorkflowJSON: jest.fn().mockResolvedValue({ id: 'wf-main' }),
} as unknown as InstanceAiContext['workflowService'],
});
const tool = createSubmitWorkflowTool(
context,
makeBuildSuccessWorkspace({
name: 'Main',
nodes: [
{
id: '1',
name: 'Run Chunk',
type: 'n8n-nodes-base.executeWorkflow',
typeVersion: 1.2,
position: [0, 0],
parameters: {
source: 'database',
workflowId: { __rl: true, mode: 'id', value: 'wf-chunk' },
},
},
],
connections: {},
}),
(attempt) => {
attempts.push(attempt);
},
) as unknown as Executable;
await tool.execute({ filePath: 'src/workflow.ts', name: 'Main' });
expect(attempts[0]).toMatchObject({
success: true,
workflowId: 'wf-main',
referencedWorkflowIds: ['wf-chunk'],
});
});
});
describe('classifySubmitFailure', () => {
it('routes credential access save failures to setup instead of code remediation', () => {
const remediation = classifySubmitFailure(

View File

@ -164,13 +164,7 @@ export function createBuildWorkflowTool(context: InstanceAiContext) {
// Resolve undefined/null credentials before saving.
// newCredential() produces NewCredentialImpl which serializes to undefined.
const credentialSnapshot = await buildCredentialSnapshot(context.credentialService);
await resolveCredentials(
json,
workflowId,
context,
credentialSnapshot.map,
credentialSnapshot.list,
);
await resolveCredentials(json, workflowId, context, credentialSnapshot.list);
// Strip credential entries that are no longer valid for the current
// parameters. Resolution above (and the LLM itself) can re-emit stale

View File

@ -9,12 +9,6 @@ import type { WorkflowJSON } from '@n8n/workflow-sdk';
import type { InstanceAiContext } from '../../types';
/**
* Credential map passed from the orchestrator.
* Keyed by credential type (e.g., "openAiApi", "gmailOAuth2", "slackApi").
*/
export type CredentialMap = Map<string, { id: string; name: string }>;
/** Flat credential entry — preserves duplicates of the same type. */
export interface CredentialEntry {
id: string;
@ -24,11 +18,10 @@ export interface CredentialEntry {
/**
* Paired credential snapshot produced from a single `credentialService.list()`
* call: a type-keyed map for fallback resolution AND a flat list for
* validating raw credential ids without losing duplicates of the same type.
* call. The flat list validates raw credential ids without losing duplicates
* of the same type.
*/
export interface CredentialSnapshot {
map: CredentialMap;
list: CredentialEntry[];
}
@ -39,29 +32,16 @@ export interface CredentialSnapshot {
export async function buildCredentialSnapshot(
credentialService: Pick<InstanceAiContext['credentialService'], 'list'>,
): Promise<CredentialSnapshot> {
const map: CredentialMap = new Map();
const list: CredentialEntry[] = [];
try {
const allCreds = await credentialService.list();
for (const cred of allCreds) {
map.set(cred.type, { id: cred.id, name: cred.name });
list.push({ id: cred.id, name: cred.name, type: cred.type });
}
} catch {
// Non-fatal — credentials will be unresolved
}
return { map, list };
}
/**
* Build a credential map from all available credentials.
* Non-fatal returns an empty map if listing fails.
*/
export async function buildCredentialMap(
credentialService: Pick<InstanceAiContext['credentialService'], 'list'>,
): Promise<CredentialMap> {
const { map } = await buildCredentialSnapshot(credentialService);
return map;
return { list };
}
/** Result of credential resolution — includes mock metadata and sidecar verification data. */
@ -74,6 +54,8 @@ export interface CredentialResolutionResult {
mockedCredentialsByNode: Record<string, string[]>;
/** Pin data for verification only — NEVER written to workflow JSON. */
verificationPinData: Record<string, Array<Record<string, unknown>>>;
/** True when mocked credential nodes can be skipped by existing workflow-level pin data. */
usesWorkflowPinDataForVerification: boolean;
}
/**
@ -82,7 +64,7 @@ export interface CredentialResolutionResult {
* `newCredential()` produces `NewCredentialImpl` which serializes to `undefined`
* in `toJSON()`. Resolution strategy (in order):
* 1. Restore from the existing workflow (preserve the user's chosen credential on updates)
* 2. Match by credential type from the credential map (fallback for new nodes)
* 2. Preserve explicit valid raw credential ids
* 3. Mock: remove the credential key and produce sidecar verification pin data
*
* Mocked credentials produce verification-only pin data that is returned separately
@ -92,13 +74,13 @@ export async function resolveCredentials(
json: WorkflowJSON,
workflowId: string | undefined,
ctx: InstanceAiContext,
credentialMap: CredentialMap,
availableCredentials?: CredentialEntry[],
): Promise<CredentialResolutionResult> {
const mockedNodeNames: string[] = [];
const mockedCredentialTypesSet = new Set<string>();
const mockedCredentialsByNode: Record<string, string[]> = {};
const verificationPinData: Record<string, Array<Record<string, unknown>>> = {};
let usesWorkflowPinDataForVerification = false;
// Build a map of existing credentials by node name (for updates)
const existingCredsByNode = new Map<string, Record<string, unknown>>();
@ -149,7 +131,9 @@ export async function resolveCredentials(
// Produce sidecar verification pin data (never saved to workflow).
// If the workflow already has real pinData for this node, skip — the
// existing pinData will suffice for execution skipping.
if (!(json.pinData && nodeName in json.pinData)) {
if (json.pinData && nodeName in json.pinData) {
usesWorkflowPinDataForVerification = true;
} else {
verificationPinData[nodeName] ??= [];
if (verificationPinData[nodeName].length === 0) {
verificationPinData[nodeName].push({ _mockedCredential: key });
@ -174,17 +158,7 @@ export async function resolveCredentials(
continue;
}
// Try 2: look up by credential type from the map (fallback for new nodes).
// Note: the map only stores one credential per type, so when multiple
// credentials of the same type exist this is an arbitrary pick.
const fromMap = credentialMap.get(key);
if (fromMap) {
creds[key] = fromMap;
cleanupMockPinData(json, node.name);
continue;
}
// Try 3: Mock — remove the credential key and produce sidecar verification data.
// Mock — remove the credential key and produce sidecar verification data.
// The credential key is deleted so the saved workflow doesn't reference a
// non-existent credential. Verification pin data is produced so the execution
// engine can skip this node during test runs.
@ -201,6 +175,7 @@ export async function resolveCredentials(
mockedCredentialTypes: [...mockedCredentialTypesSet],
mockedCredentialsByNode,
verificationPinData,
usesWorkflowPinDataForVerification,
};
}

View File

@ -17,7 +17,7 @@
import { createTool } from '@mastra/core/tools';
import type { Workspace } from '@mastra/core/workspace';
import type { CredentialEntry, CredentialMap } from './resolve-credentials';
import type { CredentialEntry } from './resolve-credentials';
import {
createSubmitWorkflowTool,
resolveSandboxWorkflowFilePath,
@ -214,7 +214,6 @@ export function wrapSubmitExecuteWithIdentity(
export function createIdentityEnforcedSubmitWorkflowTool(args: {
context: InstanceAiContext;
workspace: Workspace;
credentialMap?: CredentialMap;
availableCredentials?: CredentialEntry[];
onAttempt: (attempt: SubmitWorkflowAttempt) => Promise<void> | void;
root: string;
@ -226,7 +225,6 @@ export function createIdentityEnforcedSubmitWorkflowTool(args: {
const underlying = createSubmitWorkflowTool(
args.context,
args.workspace,
args.credentialMap,
async (attempt) => {
await args.onAttempt(budgetTracker.recordAttempt(attempt));
},

View File

@ -14,12 +14,9 @@ import { validateWorkflow } from '@n8n/workflow-sdk';
import { createHash, randomUUID } from 'node:crypto';
import { z } from 'zod';
import {
resolveCredentials,
type CredentialEntry,
type CredentialMap,
} from './resolve-credentials';
import { resolveCredentials, type CredentialEntry } from './resolve-credentials';
import { stripStaleCredentialsFromWorkflow } from './setup-workflow.service';
import { getReferencedWorkflowIds, isTriggerNodeType } from './workflow-json-utils';
import type { InstanceAiContext } from '../../types';
import type { ValidationWarning } from '../../workflow-builder';
import { partitionWarnings } from '../../workflow-builder';
@ -28,6 +25,8 @@ import type { RemediationMetadata } from '../../workflow-loop/workflow-loop-stat
import { escapeSingleQuotes, readFileViaSandbox, runInSandbox } from '../../workspace/sandbox-fs';
import { getWorkspaceRoot } from '../../workspace/sandbox-setup';
export { getReferencedWorkflowIds, isTriggerNodeType };
export interface SubmitWorkflowAttempt {
filePath: string;
sourceHash: string;
@ -49,6 +48,10 @@ export interface SubmitWorkflowAttempt {
mockedCredentialsByNode?: Record<string, string[]>;
/** Verification-only pin data — scoped to this build, never persisted to workflow. */
verificationPinData?: Record<string, Array<Record<string, unknown>>>;
/** True when mocked credentials can be verified with saved workflow-level pin data. */
usesWorkflowPinDataForVerification?: boolean;
/** Workflow IDs referenced by Execute Workflow nodes in this submitted workflow. */
referencedWorkflowIds?: string[];
/** Whether any node parameters contain unresolved placeholder values. */
hasUnresolvedPlaceholders?: boolean;
remediation?: RemediationMetadata;
@ -69,32 +72,6 @@ const WEBHOOK_NODE_TYPES = new Set([
'@n8n/n8n-nodes-langchain.chatTrigger',
]);
/**
* Node types the bypassPlan post-build verify flow can exercise without user
* approval (verify-built-workflow injects sidecar pin data matching each
* trigger's production output shape). Kept in sync with the per-trigger
* inputData shape block in the orchestrator system prompt.
*/
const KNOWN_MOCKABLE_TRIGGER_TYPES = new Set([
'n8n-nodes-base.webhook',
'n8n-nodes-base.formTrigger',
'n8n-nodes-base.scheduleTrigger',
'@n8n/n8n-nodes-langchain.chatTrigger',
]);
/**
* Whether a node's type should be surfaced in `SubmitWorkflowAttempt.triggerNodes`
* so the orchestrator can decide if it can verify the build without user input.
* Known-mockable types feed the post-build verify step directly; other `*Trigger`
* suffix types are included for visibility but skipped by the verify step.
* Exported for direct unit coverage.
*/
export function isTriggerNodeType(nodeType: string | undefined): boolean {
if (!nodeType) return false;
if (KNOWN_MOCKABLE_TRIGGER_TYPES.has(nodeType)) return true;
return nodeType.endsWith('Trigger') || nodeType.endsWith('trigger');
}
/**
* Ensure webhook nodes have a webhookId so n8n registers clean URL paths.
* Without it, getNodeWebhookPath() falls back to encoding the node name
@ -167,9 +144,7 @@ function enhanceBuildErrors(errors: string[]): string[] {
// Re-export from shared module for backward compatibility
export {
buildCredentialMap,
resolveCredentials,
type CredentialMap,
type CredentialResolutionResult,
} from './resolve-credentials';
@ -198,6 +173,10 @@ export const submitWorkflowOutputSchema = z.object({
mockedCredentialsByNode: z.record(z.array(z.string())).optional(),
/** Verification-only pin data — scoped to this build, never persisted to workflow. */
verificationPinData: z.record(z.array(z.record(z.unknown()))).optional(),
/** True when mocked credentials can be verified with saved workflow-level pin data. */
usesWorkflowPinDataForVerification: z.boolean().optional(),
/** Workflow IDs referenced by Execute Workflow nodes in this submitted workflow. */
referencedWorkflowIds: z.array(z.string()).optional(),
remediation: z
.object({
category: z.enum(['code_fixable', 'needs_setup', 'blocked']),
@ -286,16 +265,13 @@ export function classifySubmitFailure(
export function createSubmitWorkflowTool(
context: InstanceAiContext,
workspace: Workspace,
credentialMap: CredentialMap = new Map(),
onAttempt?: (attempt: SubmitWorkflowAttempt) => void | Promise<void>,
availableCredentials?: CredentialEntry[],
) {
return createTool({
id: 'submit-workflow',
description:
'Submit a workflow from a TypeScript file in the sandbox. Reads the file, validates it, ' +
'and saves it to n8n as a draft. Publishing policy lives in the builder prompt ' +
'(main workflows wait for the user; sub-workflow chunks may be auto-published).',
'Submit a workflow from a TypeScript file in the sandbox. Reads the file, validates it, and saves it to n8n as a draft.',
inputSchema: submitWorkflowInputSchema,
outputSchema: submitWorkflowOutputSchema,
execute: async ({
@ -432,15 +408,9 @@ export function createSubmitWorkflowTool(
// Resolve undefined/null credentials before saving.
// newCredential() produces NewCredentialImpl which serializes to undefined in toJSON().
// For updates: restore from the existing workflow's resolved credentials.
// For new nodes: look up credentials by name from the credential service.
// Unresolved credentials are mocked via pinned data when available.
const mockResult = await resolveCredentials(
json,
workflowId,
context,
credentialMap,
availableCredentials,
);
// For new nodes: preserve explicit valid credentials; unresolved credentials
// are mocked with sidecar pin data for verification.
const mockResult = await resolveCredentials(json, workflowId, context, availableCredentials);
// Strip credential entries that are no longer valid for the current
// parameters. Resolution above (and the LLM itself) can re-emit stale
@ -504,6 +474,7 @@ export function createSubmitWorkflowTool(
// Scan node parameters for unresolved placeholder values
const hasPlaceholders = (json.nodes ?? []).some((n) => hasPlaceholderDeep(n.parameters));
const referencedWorkflowIds = getReferencedWorkflowIds(json);
await reportAttempt({
success: true,
@ -518,6 +489,9 @@ export function createSubmitWorkflowTool(
hasMockedCredentials && Object.keys(mockResult.verificationPinData).length > 0
? mockResult.verificationPinData
: undefined,
usesWorkflowPinDataForVerification:
hasMockedCredentials && mockResult.usesWorkflowPinDataForVerification ? true : undefined,
referencedWorkflowIds: referencedWorkflowIds.length > 0 ? referencedWorkflowIds : undefined,
hasUnresolvedPlaceholders: hasPlaceholders || undefined,
});
return {
@ -533,6 +507,9 @@ export function createSubmitWorkflowTool(
hasMockedCredentials && Object.keys(mockResult.verificationPinData).length > 0
? mockResult.verificationPinData
: undefined,
usesWorkflowPinDataForVerification:
hasMockedCredentials && mockResult.usesWorkflowPinDataForVerification ? true : undefined,
referencedWorkflowIds: referencedWorkflowIds.length > 0 ? referencedWorkflowIds : undefined,
warnings:
informational.length > 0
? informational.map((w) => `[${w.code}]: ${w.message}`)

View File

@ -0,0 +1,55 @@
import type { WorkflowJSON } from '@n8n/workflow-sdk';
const KNOWN_MOCKABLE_TRIGGER_TYPES = new Set([
'n8n-nodes-base.webhook',
'n8n-nodes-base.formTrigger',
'n8n-nodes-base.scheduleTrigger',
'@n8n/n8n-nodes-langchain.chatTrigger',
]);
function isRecord(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null;
}
export function isMockableTriggerNodeType(nodeType: string | undefined): boolean {
return nodeType !== undefined && KNOWN_MOCKABLE_TRIGGER_TYPES.has(nodeType);
}
export function isTriggerNodeType(nodeType: string | undefined): boolean {
if (!nodeType) return false;
if (isMockableTriggerNodeType(nodeType)) return true;
return nodeType.endsWith('Trigger') || nodeType.endsWith('trigger');
}
function extractWorkflowIdParameter(value: unknown): string | undefined {
const rawValue = isRecord(value) ? value.value : value;
if (typeof rawValue !== 'string') return undefined;
const workflowId = rawValue.trim();
if (workflowId === '' || workflowId.startsWith('=')) return undefined;
return workflowId;
}
function shouldSkipReferencedWorkflow(source: unknown): boolean {
return typeof source === 'string' && source !== 'database';
}
export function getReferencedWorkflowIds(json: WorkflowJSON): string[] {
const referencedWorkflowIds: string[] = [];
const seen = new Set<string>();
for (const node of json.nodes ?? []) {
if (node.disabled || node.type !== 'n8n-nodes-base.executeWorkflow') continue;
const parameters = isRecord(node.parameters) ? node.parameters : {};
if (shouldSkipReferencedWorkflow(parameters.source)) continue;
const workflowId = extractWorkflowIdParameter(parameters.workflowId);
if (!workflowId || seen.has(workflowId)) continue;
seen.add(workflowId);
referencedWorkflowIds.push(workflowId);
}
return referencedWorkflowIds;
}

View File

@ -9,6 +9,8 @@ export {
attemptResultSchema,
attemptRecordSchema,
triggerTypeSchema,
workflowVerificationReadinessSchema,
workflowSetupRequirementSchema,
workflowBuildOutcomeSchema,
verificationVerdictSchema,
verificationResultSchema,
@ -25,6 +27,9 @@ export type {
AttemptResult,
AttemptRecord,
TriggerType,
WorkflowVerificationEvidence,
WorkflowVerificationReadiness,
WorkflowSetupRequirement,
WorkflowBuildOutcome,
VerificationVerdict,
VerificationResult,

View File

@ -111,11 +111,42 @@ export const workflowVerificationEvidenceSchema = z.object({
export type WorkflowVerificationEvidence = z.infer<typeof workflowVerificationEvidenceSchema>;
export const workflowVerificationReadinessSchema = z.discriminatedUnion('status', [
z.object({ status: z.literal('ready') }),
z.object({ status: z.literal('already_verified') }),
z.object({
status: z.literal('needs_setup'),
reason: z.enum([
'unresolved-placeholders',
'missing-mocked-credential-pin-data',
'workflow-needs-setup',
]),
guidance: z.string(),
}),
z.object({
status: z.literal('not_verifiable'),
reason: z.enum(['not-submitted', 'missing-workflow-id', 'non-mockable-trigger']),
guidance: z.string(),
}),
]);
export type WorkflowVerificationReadiness = z.infer<typeof workflowVerificationReadinessSchema>;
export const workflowSetupRequirementSchema = z.discriminatedUnion('status', [
z.object({ status: z.literal('not_required') }),
z.object({
status: z.literal('required'),
reason: z.enum(['mocked-credentials', 'unresolved-placeholders', 'workflow-needs-setup']),
guidance: z.string(),
}),
]);
export type WorkflowSetupRequirement = z.infer<typeof workflowSetupRequirementSchema>;
/**
* Structured trigger descriptor for each trigger node in the submitted workflow.
* The orchestrator uses `nodeType` to decide whether the bypassPlan post-build
* flow can invoke `verify-built-workflow` (mockable types) or must defer to a
* manual user test (polling / OAuth-bound triggers).
* The orchestrator uses `nodeType` only to shape verification input data.
* Whether verification is allowed is exposed through `verificationReadiness`.
*/
export const triggerNodeDescriptorSchema = z.object({
nodeName: z.string(),
@ -148,8 +179,19 @@ export const workflowBuildOutcomeSchema = z.object({
mockedCredentialsByNode: z.record(z.array(z.string())).optional(),
/** Verification-only pin data — scoped to this build, never persisted to workflow. */
verificationPinData: z.record(z.array(z.record(z.unknown()))).optional(),
/** True when mocked credentials can be verified with saved workflow-level pin data. */
usesWorkflowPinDataForVerification: z.boolean().optional(),
/** Draft sub-workflows created by the builder that must publish before the main workflow. */
supportingWorkflowIds: z.array(z.string()).optional(),
/** Whether any node parameters contain unresolved placeholder values. */
hasUnresolvedPlaceholders: z.boolean().optional(),
/**
* Deterministic post-build routing verdict. The orchestrator should use this
* instead of reasoning over pin-data internals or trigger allow-lists.
*/
verificationReadiness: workflowVerificationReadinessSchema.optional(),
/** Deterministic setup handoff verdict for post-verification workflow setup. */
setupRequirement: workflowSetupRequirementSchema.optional(),
remediation: remediationMetadataSchema.optional(),
/**
* Structured verification record from the most recent `verify-built-workflow`