mirror of
https://github.com/n8n-io/n8n.git
synced 2026-05-12 16:10:30 +02:00
feat: Move utils to @n8n/ai-utilities, add openai handler (#25362)
This commit is contained in:
parent
724d3cf857
commit
f2926d63e6
|
|
@ -1,7 +1,7 @@
|
|||
import { defineConfig } from 'eslint/config';
|
||||
import { defineConfig, globalIgnores } from 'eslint/config';
|
||||
import { nodeConfig } from '@n8n/eslint-config/node';
|
||||
|
||||
export default defineConfig(nodeConfig, {
|
||||
export default defineConfig(nodeConfig, globalIgnores(['scripts/**']), {
|
||||
rules: {
|
||||
'@typescript-eslint/no-explicit-any': 'warn',
|
||||
'@typescript-eslint/no-unsafe-assignment': 'warn',
|
||||
|
|
@ -11,5 +11,6 @@ export default defineConfig(nodeConfig, {
|
|||
'no-case-declarations': 'warn',
|
||||
'@typescript-eslint/require-await': 'warn',
|
||||
'@typescript-eslint/prefer-nullish-coalescing': 'warn',
|
||||
'@typescript-eslint/naming-convention': 'warn',
|
||||
},
|
||||
});
|
||||
|
|
|
|||
|
|
@ -9,16 +9,10 @@ import {
|
|||
type StreamChunk,
|
||||
type Tool,
|
||||
type ToolCall,
|
||||
type TokenUsage,
|
||||
} from '../../src';
|
||||
import { parseSSEStream } from '../../src/utils/sse';
|
||||
|
||||
// =============================================================================
|
||||
// OpenAI API Types
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* OpenAI API tool definition
|
||||
*/
|
||||
export type OpenAITool =
|
||||
| {
|
||||
type: 'function';
|
||||
|
|
@ -31,14 +25,8 @@ export type OpenAITool =
|
|||
type: 'web_search';
|
||||
};
|
||||
|
||||
/**
|
||||
* OpenAI API tool choice
|
||||
*/
|
||||
export type OpenAIToolChoice = 'auto' | 'required' | 'none' | { type: 'function'; name: string };
|
||||
|
||||
/**
|
||||
* OpenAI Responses API request body
|
||||
*/
|
||||
export interface OpenAIResponsesRequest {
|
||||
model: string;
|
||||
input: string | ResponsesInputItem[];
|
||||
|
|
@ -54,9 +42,6 @@ export interface OpenAIResponsesRequest {
|
|||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenAI Responses API response
|
||||
*/
|
||||
export interface OpenAIResponsesResponse {
|
||||
id: string;
|
||||
object: string;
|
||||
|
|
@ -81,9 +66,6 @@ export interface OpenAIResponsesResponse {
|
|||
service_tier?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenAI Responses API output item
|
||||
*/
|
||||
export type ResponsesOutputItem =
|
||||
| {
|
||||
type: 'message';
|
||||
|
|
@ -110,9 +92,6 @@ export type ResponsesOutputItem =
|
|||
}>;
|
||||
};
|
||||
|
||||
/**
|
||||
* OpenAI streaming event types
|
||||
*/
|
||||
export interface OpenAIStreamEvent {
|
||||
type: string;
|
||||
delta?: string;
|
||||
|
|
@ -121,9 +100,6 @@ export interface OpenAIStreamEvent {
|
|||
response?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* OpenAI API error response
|
||||
*/
|
||||
export interface OpenAIErrorResponse {
|
||||
error: {
|
||||
message: string;
|
||||
|
|
@ -133,19 +109,11 @@ export interface OpenAIErrorResponse {
|
|||
};
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// HTTP Helper Functions
|
||||
// =============================================================================
|
||||
|
||||
/**
|
||||
* Make a POST request to OpenAI API
|
||||
*/
|
||||
async function openAIFetch(
|
||||
url: string,
|
||||
apiKey: string,
|
||||
body: OpenAIResponsesRequest,
|
||||
): Promise<OpenAIResponsesResponse> {
|
||||
// Remove undefined values from request body
|
||||
const cleanedBody = Object.fromEntries(
|
||||
Object.entries(body).filter(([_, value]) => value !== undefined),
|
||||
);
|
||||
|
|
@ -172,15 +140,11 @@ async function openAIFetch(
|
|||
return (await response.json()) as OpenAIResponsesResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a streaming POST request to OpenAI API
|
||||
*/
|
||||
async function openAIFetchStream(
|
||||
url: string,
|
||||
apiKey: string,
|
||||
body: OpenAIResponsesRequest,
|
||||
): Promise<ReadableStream<Uint8Array>> {
|
||||
// Remove undefined values from request body
|
||||
const cleanedBody = Object.fromEntries(
|
||||
Object.entries(body).filter(([_, value]) => value !== undefined),
|
||||
);
|
||||
|
|
@ -211,25 +175,17 @@ async function openAIFetchStream(
|
|||
return response.body;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse OpenAI streaming events from SSE stream
|
||||
* Uses the robust SSE parser and extracts OpenAI-specific event data
|
||||
*/
|
||||
async function* parseOpenAIStreamEvents(
|
||||
body: ReadableStream<Uint8Array>,
|
||||
): AsyncIterable<OpenAIStreamEvent> {
|
||||
for await (const message of parseSSEStream(body)) {
|
||||
// OpenAI sends events in the data field
|
||||
if (!message.data) continue;
|
||||
|
||||
// Skip [DONE] marker
|
||||
if (message.data === '[DONE]') continue;
|
||||
|
||||
try {
|
||||
const event = JSON.parse(message.data);
|
||||
yield event as OpenAIStreamEvent;
|
||||
} catch (e) {
|
||||
// Skip invalid JSON - log warning in development
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
console.warn('Failed to parse OpenAI SSE event:', message.data);
|
||||
}
|
||||
|
|
@ -237,10 +193,6 @@ async function* parseOpenAIStreamEvents(
|
|||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OpenAI Responses API – input/output conversion
|
||||
// =============================================================================
|
||||
|
||||
type ResponsesInputItem =
|
||||
| { role: 'user'; content: string }
|
||||
| { role: 'user'; content: Array<{ type: 'input_text'; text: string }> }
|
||||
|
|
@ -257,10 +209,6 @@ type ResponsesInputItem =
|
|||
}
|
||||
| { type: 'function_call_output'; call_id: string; output: string };
|
||||
|
||||
/**
|
||||
* Convert N8nMessage[] to OpenAI Responses API input and instructions.
|
||||
* @see https://platform.openai.com/docs/api-reference/responses/create
|
||||
*/
|
||||
function genericMessagesToResponsesInput(messages: Message[]): {
|
||||
instructions?: string;
|
||||
input: string | ResponsesInputItem[];
|
||||
|
|
@ -291,7 +239,6 @@ function genericMessagesToResponsesInput(messages: Message[]): {
|
|||
|
||||
if (msg.role === 'ai') {
|
||||
for (const contentPart of msg.content) {
|
||||
// Otherwise reconstruct from message content
|
||||
if (contentPart.type === 'text') {
|
||||
inputItems.push({
|
||||
type: 'message',
|
||||
|
|
@ -360,9 +307,6 @@ function genericMessagesToResponsesInput(messages: Message[]): {
|
|||
return { instructions, input: inputItems };
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert N8nTool to OpenAI Responses API function tool format.
|
||||
*/
|
||||
function genericToolToResponsesTool(tool: Tool): OpenAITool {
|
||||
if (tool.type === 'provider') {
|
||||
if (tool.name === 'web_search') {
|
||||
|
|
@ -383,9 +327,6 @@ function genericToolToResponsesTool(tool: Tool): OpenAITool {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse Responses API output array into text and tool calls.
|
||||
*/
|
||||
function parseResponsesOutput(output: unknown[]): {
|
||||
text: string;
|
||||
toolCalls: ToolCall[];
|
||||
|
|
@ -421,32 +362,33 @@ function parseResponsesOutput(output: unknown[]): {
|
|||
return { text, toolCalls };
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// OpenAI Chat Model (Responses API)
|
||||
// =============================================================================
|
||||
function parseTokenUsage(
|
||||
usage: OpenAIResponsesResponse['usage'] | undefined,
|
||||
): TokenUsage | undefined {
|
||||
return usage
|
||||
? {
|
||||
promptTokens: usage.input_tokens ?? 0,
|
||||
completionTokens: usage.output_tokens ?? 0,
|
||||
totalTokens: usage.total_tokens ?? 0,
|
||||
inputTokenDetails: {
|
||||
...(!!usage.input_tokens_details?.cached_tokens && {
|
||||
cacheRead: usage.input_tokens_details.cached_tokens,
|
||||
}),
|
||||
},
|
||||
outputTokenDetails: {
|
||||
...(!!usage.output_tokens_details?.reasoning_tokens && {
|
||||
reasoning: usage.output_tokens_details.reasoning_tokens,
|
||||
}),
|
||||
},
|
||||
}
|
||||
: undefined;
|
||||
}
|
||||
|
||||
export interface OpenAIChatModelConfig extends ChatModelConfig {
|
||||
/**
|
||||
* OpenAI API key (defaults to process.env.OPENAI_API_KEY)
|
||||
*/
|
||||
apiKey?: string;
|
||||
|
||||
/**
|
||||
* Base URL for the API (optional, for proxies)
|
||||
*/
|
||||
baseURL?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* N8n chat model implementation using the OpenAI Responses API.
|
||||
* Supports text, tools (function calling), and streaming.
|
||||
*
|
||||
* Note: This model does NOT execute tools automatically. When tool calls are
|
||||
* returned by the model, they are passed to the framework (e.g., LangChain)
|
||||
* which handles tool execution via its agent loop.
|
||||
*
|
||||
* @see https://platform.openai.com/docs/api-reference/responses/create
|
||||
*/
|
||||
export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
||||
private apiKey: string;
|
||||
private baseURL: string;
|
||||
|
|
@ -482,25 +424,8 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
|
||||
const { text, toolCalls } = parseResponsesOutput(response.output as unknown[]);
|
||||
|
||||
const usage = response.usage
|
||||
? {
|
||||
promptTokens: response.usage.input_tokens ?? 0,
|
||||
completionTokens: response.usage.output_tokens ?? 0,
|
||||
totalTokens: response.usage.total_tokens ?? 0,
|
||||
input_token_details: {
|
||||
...(!!response.usage.input_tokens_details?.cached_tokens && {
|
||||
cache_read: response.usage.input_tokens_details.cached_tokens,
|
||||
}),
|
||||
},
|
||||
output_token_details: {
|
||||
...(!!response.usage.output_tokens_details?.reasoning_tokens && {
|
||||
reasoning: response.usage.output_tokens_details.reasoning_tokens,
|
||||
}),
|
||||
},
|
||||
}
|
||||
: undefined;
|
||||
const usage = parseTokenUsage(response.usage);
|
||||
|
||||
// Build response metadata
|
||||
const responseMetadata: Record<string, unknown> = {
|
||||
model_provider: 'openai',
|
||||
model: response.model,
|
||||
|
|
@ -513,11 +438,9 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
user: response.user,
|
||||
service_tier: response.service_tier,
|
||||
model_name: response.model,
|
||||
// Store raw output for reconstructing messages later
|
||||
output: response.output,
|
||||
};
|
||||
|
||||
// Parse output for reasoning and other content
|
||||
for (const item of response.output as unknown[]) {
|
||||
const o = item as Record<string, unknown>;
|
||||
if (o.type === 'reasoning') {
|
||||
|
|
@ -525,7 +448,6 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
}
|
||||
}
|
||||
|
||||
// Create the message object
|
||||
const message: Message = {
|
||||
role: 'ai',
|
||||
content: [{ type: 'text', text }],
|
||||
|
|
@ -571,7 +493,6 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
|
||||
const toolCallBuffers: Record<number, { name: string; arguments: string }> = {};
|
||||
|
||||
// Parse SSE stream
|
||||
for await (const event of parseOpenAIStreamEvents(streamBody)) {
|
||||
const type = event.type;
|
||||
|
||||
|
|
@ -591,7 +512,6 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
arguments: (item.arguments as string) ?? '',
|
||||
};
|
||||
}
|
||||
// Handle reasoning items
|
||||
if (item?.type === 'reasoning') {
|
||||
const summary = (item.summary as Array<Record<string, unknown>>) ?? [];
|
||||
const reasoningText = summary
|
||||
|
|
@ -641,17 +561,10 @@ export class OpenAIChatModel extends BaseChatModel<OpenAIChatModelConfig> {
|
|||
const responseData =
|
||||
(event.response as unknown as OpenAIResponsesResponse) ??
|
||||
(event as unknown as OpenAIResponsesResponse);
|
||||
const usage = responseData.usage;
|
||||
yield {
|
||||
type: 'finish',
|
||||
finishReason: 'stop',
|
||||
usage: usage
|
||||
? {
|
||||
promptTokens: usage.input_tokens ?? 0,
|
||||
completionTokens: usage.output_tokens ?? 0,
|
||||
totalTokens: usage.total_tokens ?? 0,
|
||||
}
|
||||
: undefined,
|
||||
usage: parseTokenUsage(responseData.usage),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ import { createAgent, HumanMessage, tool } from 'langchain';
|
|||
import z from 'zod';
|
||||
|
||||
import { OpenAIChatModel } from './models/openai';
|
||||
import { supplyModel } from '../src/suppliers/supplyModel';
|
||||
import { LangchainAdapter } from '../src/adapters/langchain-chat-model';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ async function main() {
|
|||
const openaiChatModel = new OpenAIChatModel('gpt-4o', {
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
});
|
||||
chatModel = supplyModel(openaiChatModel).response;
|
||||
chatModel = new LangchainAdapter(openaiChatModel);
|
||||
} else {
|
||||
throw new Error(`Unsupported model: ${model}`);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@
|
|||
"clean": "rimraf dist .turbo",
|
||||
"dev": "pnpm run watch",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"build": "tsc --build tsconfig.build.json && tsc-alias -p tsconfig.build.json",
|
||||
"copy-tokenizer-json": "node scripts/copy-tokenizer-json.js .",
|
||||
"build": "tsc --build tsconfig.build.json && tsc-alias -p tsconfig.build.json && pnpm copy-tokenizer-json",
|
||||
"format": "biome format --write .",
|
||||
"format:check": "biome ci .",
|
||||
"lint": "eslint . --quiet",
|
||||
|
|
@ -41,6 +42,10 @@
|
|||
"@n8n/config": "workspace:*",
|
||||
"@n8n/typescript-config": "workspace:*",
|
||||
"n8n-workflow": "workspace:*",
|
||||
"tmp-promise": "3.0.3"
|
||||
"tmp-promise": "3.0.3",
|
||||
"js-tiktoken": "catalog:",
|
||||
"https-proxy-agent": "catalog:",
|
||||
"proxy-from-env": "^1.1.0",
|
||||
"undici": "^6.21.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ function copyTokenizerJsonFiles(baseDir) {
|
|||
fs.mkdirSync(targetDir, { recursive: true });
|
||||
}
|
||||
// Copy all tokenizer JSON files
|
||||
const files = glob.sync('utils/tokenizer/*.json', { cwd: baseDir });
|
||||
const files = glob.sync('src/utils/tokenizer/*.json', { cwd: baseDir });
|
||||
for (const file of files) {
|
||||
const sourcePath = path.resolve(baseDir, file);
|
||||
const targetPath = path.resolve(baseDir, 'dist', file);
|
||||
const targetPath = path.resolve(baseDir, 'dist', file.replace('src/', ''));
|
||||
fs.copyFileSync(sourcePath, targetPath);
|
||||
console.log(`Copied: ${file} -> dist/${file}`);
|
||||
console.log(`Copied: ${file} -> ${targetPath.replace(baseDir, '')}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4,22 +4,49 @@ import type { BindToolsInput } from '@langchain/core/language_models/chat_models
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { BaseMessage, ContentBlock } from '@langchain/core/messages';
|
||||
import { AIMessage, AIMessageChunk } from '@langchain/core/messages';
|
||||
import type { ChatResult } from '@langchain/core/outputs';
|
||||
import type { ChatResult, LLMResult } from '@langchain/core/outputs';
|
||||
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
||||
import type { Runnable } from '@langchain/core/runnables';
|
||||
import type { ISupplyDataFunctions } from 'n8n-workflow';
|
||||
|
||||
import { fromLcMessage } from '../converters/message';
|
||||
import { fromLcTool } from '../converters/tool';
|
||||
import type { ChatModel, ChatModelConfig } from '../types/chat-model';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../utils/failed-attempt-handler/n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../utils/n8n-llm-tracing';
|
||||
|
||||
export class LangchainAdapter<
|
||||
CallOptions extends ChatModelConfig = ChatModelConfig,
|
||||
> extends BaseChatModel<CallOptions> {
|
||||
constructor(private chatModel: ChatModel) {
|
||||
super({
|
||||
// TODO: Move N8nLlmTracing to ai-utilities
|
||||
// callbacks: [new N8nLlmTracing(this)],
|
||||
});
|
||||
constructor(
|
||||
private chatModel: ChatModel,
|
||||
private ctx?: ISupplyDataFunctions,
|
||||
) {
|
||||
const params = {
|
||||
...(ctx
|
||||
? {
|
||||
callbacks: [
|
||||
new N8nLlmTracing(ctx, {
|
||||
tokensUsageParser: (result: LLMResult) => {
|
||||
const tokenUsage = result?.llmOutput?.tokenUsage as
|
||||
| AIMessage['usage_metadata']
|
||||
| undefined;
|
||||
const completionTokens = (tokenUsage?.output_tokens as number) ?? 0;
|
||||
const promptTokens = (tokenUsage?.input_tokens as number) ?? 0;
|
||||
|
||||
return {
|
||||
completionTokens,
|
||||
promptTokens,
|
||||
totalTokens: completionTokens + promptTokens,
|
||||
};
|
||||
},
|
||||
}),
|
||||
],
|
||||
onFailedAttempt: makeN8nLlmFailedAttemptHandler(ctx),
|
||||
}
|
||||
: {}),
|
||||
};
|
||||
super(params);
|
||||
}
|
||||
|
||||
_llmType(): string {
|
||||
|
|
@ -48,8 +75,16 @@ export class LangchainAdapter<
|
|||
input_tokens: result.usage.promptTokens ?? 0,
|
||||
output_tokens: result.usage.completionTokens ?? 0,
|
||||
total_tokens: result.usage.totalTokens ?? 0,
|
||||
input_token_details: result.usage.input_token_details,
|
||||
output_token_details: result.usage.output_token_details,
|
||||
input_token_details: result.usage.inputTokenDetails
|
||||
? {
|
||||
cache_read: result.usage.inputTokenDetails.cacheRead,
|
||||
}
|
||||
: undefined,
|
||||
output_token_details: result.usage.outputTokenDetails
|
||||
? {
|
||||
reasoning: result.usage.outputTokenDetails.reasoning,
|
||||
}
|
||||
: undefined,
|
||||
}
|
||||
: undefined;
|
||||
|
||||
|
|
@ -84,7 +119,7 @@ export class LangchainAdapter<
|
|||
],
|
||||
llmOutput: {
|
||||
id: result.id,
|
||||
estimatedTokenUsage: usage_metadata,
|
||||
tokenUsage: usage_metadata,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
@ -182,7 +217,7 @@ export class LangchainAdapter<
|
|||
): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions> {
|
||||
const genericTools = tools.map(fromLcTool);
|
||||
const newModel = this.chatModel.withTools(genericTools);
|
||||
const newAdapter = new LangchainAdapter(newModel);
|
||||
const newAdapter = new LangchainAdapter(newModel, this.ctx);
|
||||
|
||||
return newAdapter as any;
|
||||
}
|
||||
|
|
@ -29,12 +29,6 @@ export abstract class BaseChatModel<TConfig extends ChatModelConfig = ChatModelC
|
|||
return newInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all bound tools
|
||||
*/
|
||||
getTools(): Tool[] {
|
||||
return [...this.tools];
|
||||
}
|
||||
/**
|
||||
* Merge configuration with defaults
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ export function fromLcTool(tool: LangchainChatModels.BindToolsInput): N8nTools.T
|
|||
};
|
||||
}
|
||||
if ('function' in tool && 'type' in tool && tool.type === 'function') {
|
||||
const functionTool = tool as FunctionDefinition;
|
||||
const functionTool = tool.function as FunctionDefinition;
|
||||
return {
|
||||
type: 'function',
|
||||
name: functionTool.name,
|
||||
|
|
|
|||
|
|
@ -6,9 +6,23 @@ export {
|
|||
validateEmbedQueryInput,
|
||||
validateEmbedDocumentsInput,
|
||||
} from './utils/embeddings-input-validation';
|
||||
export { getMetadataFiltersValues } from './utils/helpers';
|
||||
export { getMetadataFiltersValues, hasLongSequentialRepeat } from './utils/helpers';
|
||||
export { N8nBinaryLoader } from './utils/n8n-binary-loader';
|
||||
export { N8nJsonLoader } from './utils/n8n-json-loader';
|
||||
export { N8nLlmTracing } from './utils/n8n-llm-tracing';
|
||||
export {
|
||||
estimateTokensFromStringList,
|
||||
estimateTokensByCharCount,
|
||||
estimateTextSplitsByTokens,
|
||||
} from './utils/tokenizer/token-estimator';
|
||||
export { encodingForModel, getEncoding } from './utils/tokenizer/tiktoken';
|
||||
export { makeN8nLlmFailedAttemptHandler } from './utils/failed-attempt-handler/n8nLlmFailedAttemptHandler';
|
||||
export {
|
||||
getProxyAgent,
|
||||
getNodeProxyAgent,
|
||||
proxyFetch,
|
||||
type AgentTimeoutOptions,
|
||||
} from './utils/http-proxy-agent';
|
||||
|
||||
// Type guards
|
||||
export {
|
||||
|
|
@ -20,8 +34,8 @@ export {
|
|||
|
||||
// Types
|
||||
export type { ChatModel, ChatModelConfig } from './types/chat-model';
|
||||
export type { GenerateResult, StreamChunk } from './types/output';
|
||||
export type { Tool, ToolResult, ToolCall } from './types/tool';
|
||||
export type { GenerateResult, StreamChunk, TokenUsage, FinishReason } from './types/output';
|
||||
export type { Tool, ToolResult, ToolCall, ProviderTool } from './types/tool';
|
||||
export type {
|
||||
Message,
|
||||
ContentFile,
|
||||
|
|
@ -36,7 +50,7 @@ export type {
|
|||
export type { JSONArray, JSONObject, JSONValue } from './types/json';
|
||||
export type { ServerSentEventMessage } from './utils/sse';
|
||||
|
||||
export { LangchainAdapter } from './adapters/langchain';
|
||||
export { LangchainAdapter } from './adapters/langchain-chat-model';
|
||||
|
||||
export { BaseChatModel } from './chat-model/base';
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,94 @@
|
|||
import { LangchainAdapter } from '../adapters/langchain';
|
||||
import type { ChatModel } from '../types/chat-model';
|
||||
import type { ServerTool } from '@langchain/core/tools';
|
||||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import type { ISupplyDataFunctions } from 'n8n-workflow';
|
||||
|
||||
export function supplyModel(model: ChatModel) {
|
||||
const adapter = new LangchainAdapter(model);
|
||||
import { LangchainAdapter } from '../adapters/langchain-chat-model';
|
||||
import { BaseChatModel } from '../chat-model/base';
|
||||
import type { ChatModel } from '../types/chat-model';
|
||||
import type { OpenAIModelOptions } from '../types/openai';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../utils/failed-attempt-handler/n8nLlmFailedAttemptHandler';
|
||||
import { getProxyAgent } from '../utils/http-proxy-agent';
|
||||
import { N8nLlmTracing } from '../utils/n8n-llm-tracing';
|
||||
|
||||
type OpenAiModel = OpenAIModelOptions & {
|
||||
type: 'openai';
|
||||
};
|
||||
type ModelOptions = ChatModel | OpenAiModel;
|
||||
|
||||
function isOpenAiModel(model: ModelOptions): model is OpenAiModel {
|
||||
return 'type' in model && model.type === 'openai' && !(model instanceof BaseChatModel);
|
||||
}
|
||||
|
||||
function getOpenAiModel(ctx: ISupplyDataFunctions, model: OpenAiModel) {
|
||||
const clientConfiguration: ClientOptions = {
|
||||
baseURL: model.baseUrl,
|
||||
};
|
||||
|
||||
if (model.defaultHeaders) {
|
||||
clientConfiguration.defaultHeaders = model.defaultHeaders;
|
||||
}
|
||||
|
||||
const timeout = model.timeout;
|
||||
clientConfiguration.fetchOptions = {
|
||||
dispatcher: getProxyAgent(model.baseUrl, {
|
||||
headersTimeout: timeout,
|
||||
bodyTimeout: timeout,
|
||||
}),
|
||||
};
|
||||
|
||||
const openAiModel = new ChatOpenAI({
|
||||
configuration: clientConfiguration,
|
||||
model: model.model,
|
||||
apiKey: model.apiKey,
|
||||
useResponsesApi: model.useResponsesApi,
|
||||
logprobs: model.logprobs,
|
||||
topLogprobs: model.topLogprobs,
|
||||
supportsStrictToolCalling: model.supportsStrictToolCalling,
|
||||
reasoning: model.reasoning,
|
||||
zdrEnabled: model.zdrEnabled,
|
||||
service_tier: model.service_tier,
|
||||
promptCacheKey: model.promptCacheKey,
|
||||
temperature: model.temperature,
|
||||
topP: model.topP,
|
||||
frequencyPenalty: model.frequencyPenalty,
|
||||
presencePenalty: model.presencePenalty,
|
||||
stopSequences: model.stopSequences,
|
||||
maxRetries: model.maxRetries,
|
||||
modelKwargs: model.additionalParams,
|
||||
verbosity: model.verbosity,
|
||||
streaming: model.streaming,
|
||||
streamUsage: model.streamUsage,
|
||||
stop: model.stop,
|
||||
maxTokens: model.maxTokens,
|
||||
maxCompletionTokens: model.maxCompletionTokens,
|
||||
callbacks: [new N8nLlmTracing(ctx)],
|
||||
onFailedAttempt: makeN8nLlmFailedAttemptHandler(ctx, model.onFailedAttempt),
|
||||
});
|
||||
|
||||
if (model.providerTools?.length) {
|
||||
openAiModel.metadata = {
|
||||
...openAiModel.metadata,
|
||||
// Tools in metadata are read by ToolAgent and added to a list of all agent tools.
|
||||
tools: model.providerTools.map<ServerTool>((tool) => ({
|
||||
// openai format requires type to be the name of the tool
|
||||
// langchain simply passes the tool object to openai as is
|
||||
type: tool.name,
|
||||
...tool.args,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
return openAiModel;
|
||||
}
|
||||
|
||||
export function supplyModel(ctx: ISupplyDataFunctions, model: ModelOptions) {
|
||||
if (isOpenAiModel(model)) {
|
||||
const openAiModel = getOpenAiModel(ctx, model);
|
||||
return {
|
||||
response: openAiModel,
|
||||
};
|
||||
}
|
||||
const adapter = new LangchainAdapter(model, ctx);
|
||||
return {
|
||||
response: adapter,
|
||||
};
|
||||
|
|
|
|||
|
|
@ -62,11 +62,6 @@ export interface ChatModelConfig {
|
|||
* Additional HTTP headers
|
||||
*/
|
||||
headers?: Record<string, string | undefined>;
|
||||
|
||||
/**
|
||||
* Provider-specific options
|
||||
*/
|
||||
providerOptions?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface ChatModel<TConfig extends ChatModelConfig = ChatModelConfig> {
|
||||
|
|
|
|||
142
packages/@n8n/ai-utilities/src/types/openai.ts
Normal file
142
packages/@n8n/ai-utilities/src/types/openai.ts
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
import type { ProviderTool } from './tool';
|
||||
|
||||
export type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | null;
|
||||
export type VerbosityParam = 'low' | 'medium' | 'high' | null;
|
||||
|
||||
export interface OpenAIModelOptions {
|
||||
baseUrl: string;
|
||||
/** Model name to use */
|
||||
model: string;
|
||||
/**
|
||||
* API key to use when making requests to OpenAI.
|
||||
*/
|
||||
apiKey: string;
|
||||
/**
|
||||
* Provider-specific tools to use.
|
||||
* @example
|
||||
* {
|
||||
* type: 'provider',
|
||||
* name: 'web_search',
|
||||
* args: {
|
||||
* search_context_size: 'medium',
|
||||
* userLocation: {
|
||||
* type: "approximate",
|
||||
* country: "US"
|
||||
* },
|
||||
* },
|
||||
* }
|
||||
*/
|
||||
providerTools?: ProviderTool[];
|
||||
defaultHeaders?: Record<string, string>;
|
||||
/**
|
||||
* Whether to use the responses API for all requests. If `false` the responses API will be used
|
||||
* only when required in order to fulfill the request.
|
||||
*/
|
||||
useResponsesApi?: boolean;
|
||||
/**
|
||||
* Whether to return log probabilities of the output tokens or not.
|
||||
* If true, returns the log probabilities of each output token returned in the content of message.
|
||||
*/
|
||||
logprobs?: boolean;
|
||||
/**
|
||||
* An integer between 0 and 5 specifying the number of most likely tokens to return at each token position,
|
||||
* each with an associated log probability. logprobs must be set to true if this parameter is used.
|
||||
*/
|
||||
topLogprobs?: number;
|
||||
/**
|
||||
* Whether the model supports the `strict` argument when passing in tools.
|
||||
* If `undefined` the `strict` argument will not be passed to OpenAI.
|
||||
*/
|
||||
supportsStrictToolCalling?: boolean;
|
||||
|
||||
reasoning?: {
|
||||
effort?: ReasoningEffort | null;
|
||||
summary?: 'auto' | 'concise' | 'detailed' | null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Should be set to `true` in tenancies with Zero Data Retention
|
||||
* @see https://platform.openai.com/docs/guides/your-data
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
zdrEnabled?: boolean;
|
||||
|
||||
/**
|
||||
* Service tier to use for this request. Can be "auto", "default", or "flex" or "priority".
|
||||
* Specifies the service tier for prioritization and latency optimization.
|
||||
*/
|
||||
service_tier?: 'auto' | 'default' | 'flex' | 'scale' | 'priority' | null;
|
||||
|
||||
/**
|
||||
* Used by OpenAI to cache responses for similar requests to optimize your cache
|
||||
* hit rates. Replaces the `user` field.
|
||||
* [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
||||
*/
|
||||
promptCacheKey?: string;
|
||||
|
||||
/** Sampling temperature to use */
|
||||
temperature?: number;
|
||||
/**
|
||||
* Maximum number of tokens to generate in the completion. -1 returns as many
|
||||
* tokens as possible given the prompt and the model's maximum context size.
|
||||
*/
|
||||
maxTokens?: number;
|
||||
/**
|
||||
* Maximum number of tokens to generate in the completion. -1 returns as many
|
||||
* tokens as possible given the prompt and the model's maximum context size.
|
||||
* Alias for `maxTokens` for reasoning models.
|
||||
*/
|
||||
maxCompletionTokens?: number;
|
||||
/** Total probability mass of tokens to consider at each step */
|
||||
topP?: number;
|
||||
/** Penalizes repeated tokens according to frequency */
|
||||
frequencyPenalty?: number;
|
||||
/** Penalizes repeated tokens */
|
||||
presencePenalty?: number;
|
||||
/** Number of completions to generate for each prompt */
|
||||
n?: number;
|
||||
/** Dictionary used to adjust the probability of specific tokens being generated */
|
||||
logitBias?: Record<string, number>;
|
||||
/** Unique string identifier representing your end-user, which can help OpenAI to monitor and detect abuse. */
|
||||
user?: string;
|
||||
/** Whether to stream the results or not. Enabling disables tokenUsage reporting */
|
||||
streaming?: boolean;
|
||||
/**
|
||||
* Whether or not to include token usage data in streamed chunks.
|
||||
* @default true
|
||||
*/
|
||||
streamUsage?: boolean;
|
||||
|
||||
/** Holds any additional parameters that are valid to pass to {@link
|
||||
* https://platform.openai.com/docs/api-reference/completions/create |
|
||||
* `openai.createCompletion`} that are not explicitly specified on this interface
|
||||
*/
|
||||
additionalParams?: Record<string, unknown>;
|
||||
/**
|
||||
* List of stop words to use when generating
|
||||
* Alias for `stopSequences`
|
||||
*/
|
||||
stop?: string[];
|
||||
/** List of stop words to use when generating */
|
||||
stopSequences?: string[];
|
||||
/**
|
||||
* Timeout to use when making requests to OpenAI.
|
||||
*/
|
||||
timeout?: number;
|
||||
/**
|
||||
* The verbosity of the model's response.
|
||||
*/
|
||||
verbosity?: VerbosityParam;
|
||||
/**
|
||||
* Maximum number of retries to attempt.
|
||||
*/
|
||||
maxRetries?: number;
|
||||
|
||||
/**
|
||||
* Custom handler to handle failed attempts. Takes the originally thrown
|
||||
* error object as input, and should itself throw an error if the input
|
||||
* error is not retryable.
|
||||
*/
|
||||
onFailedAttempt?: (error: unknown) => void;
|
||||
}
|
||||
|
|
@ -1,21 +1,26 @@
|
|||
import type { Message } from './message';
|
||||
import type { ToolCall } from './tool';
|
||||
|
||||
export type FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
||||
|
||||
export type TokenUsage<T extends Record<string, unknown> = Record<string, unknown>> = {
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
inputTokenDetails?: {
|
||||
cacheRead?: number;
|
||||
};
|
||||
outputTokenDetails?: {
|
||||
reasoning?: number;
|
||||
};
|
||||
additionalMetadata?: T;
|
||||
};
|
||||
|
||||
export interface GenerateResult {
|
||||
id?: string;
|
||||
text: string;
|
||||
finishReason?: 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
||||
usage?: {
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
input_token_details?: {
|
||||
cache_read?: number;
|
||||
};
|
||||
output_token_details?: {
|
||||
reasoning?: number;
|
||||
};
|
||||
};
|
||||
finishReason?: FinishReason;
|
||||
usage?: TokenUsage;
|
||||
/**
|
||||
* Tool calls made by the model
|
||||
*/
|
||||
|
|
@ -39,11 +44,7 @@ export interface StreamChunk {
|
|||
name?: string;
|
||||
argumentsDelta?: string;
|
||||
};
|
||||
finishReason?: string;
|
||||
usage?: {
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
};
|
||||
finishReason?: FinishReason;
|
||||
usage?: TokenUsage;
|
||||
error?: unknown;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ export const makeN8nLlmFailedAttemptHandler = (
|
|||
n8nDefaultFailedAttemptHandler(error);
|
||||
} catch (e) {
|
||||
// Wrap the error in a NodeApiError
|
||||
const apiError = new NodeApiError(ctx.getNode(), e as unknown as JsonObject, {
|
||||
const apiError = new NodeApiError(ctx.getNode(), e as JsonObject, {
|
||||
functionality: 'configuration-node',
|
||||
});
|
||||
|
||||
105
packages/@n8n/ai-utilities/src/utils/helpers.test.ts
Normal file
105
packages/@n8n/ai-utilities/src/utils/helpers.test.ts
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
import { hasLongSequentialRepeat } from './helpers';
|
||||
|
||||
describe('hasLongSequentialRepeat', () => {
|
||||
it('should return false for text shorter than threshold', () => {
|
||||
const text = 'a'.repeat(99);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for normal text without repeats', () => {
|
||||
const text = 'This is a normal text without many sequential repeating characters.';
|
||||
expect(hasLongSequentialRepeat(text)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true for text with exactly threshold repeats', () => {
|
||||
const text = 'a'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for text with more than threshold repeats', () => {
|
||||
const text = 'b'.repeat(150);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect repeats in the middle of text', () => {
|
||||
const text = 'Normal text ' + 'x'.repeat(100) + ' more normal text';
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect repeats at the end of text', () => {
|
||||
const text = 'Normal text at the beginning' + 'z'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should work with different thresholds', () => {
|
||||
const text = 'a'.repeat(50);
|
||||
expect(hasLongSequentialRepeat(text, 30)).toBe(true);
|
||||
expect(hasLongSequentialRepeat(text, 60)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle special characters', () => {
|
||||
const text = '.'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle spaces', () => {
|
||||
const text = ' '.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle newlines', () => {
|
||||
const text = '\n'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not detect non-sequential repeats', () => {
|
||||
const text = 'ababab'.repeat(50); // 300 chars but no sequential repeats
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle mixed content with repeats below threshold', () => {
|
||||
const text = 'aaa' + 'b'.repeat(50) + 'ccc' + 'd'.repeat(40) + 'eee';
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(hasLongSequentialRepeat('', 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should work with very large texts', () => {
|
||||
const normalText = 'Lorem ipsum dolor sit amet '.repeat(1000);
|
||||
const textWithRepeat = normalText + 'A'.repeat(100) + normalText;
|
||||
expect(hasLongSequentialRepeat(textWithRepeat, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect unicode character repeats', () => {
|
||||
const text = '😀'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle null input', () => {
|
||||
expect(hasLongSequentialRepeat(null as unknown as string)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle undefined input', () => {
|
||||
expect(hasLongSequentialRepeat(undefined as unknown as string)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle non-string input', () => {
|
||||
expect(hasLongSequentialRepeat(123 as unknown as string)).toBe(false);
|
||||
expect(hasLongSequentialRepeat({} as unknown as string)).toBe(false);
|
||||
expect(hasLongSequentialRepeat([] as unknown as string)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle zero or negative threshold', () => {
|
||||
const text = 'a'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 0)).toBe(false);
|
||||
expect(hasLongSequentialRepeat(text, -1)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(hasLongSequentialRepeat('', 100)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -26,3 +26,50 @@ export function getMetadataFiltersValues(
|
|||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detects if a text contains a character that repeats sequentially for a specified threshold.
|
||||
* This is used to prevent performance issues with tiktoken on highly repetitive content.
|
||||
* @param text The text to check
|
||||
* @param threshold The minimum number of sequential repeats to detect (default: 1000)
|
||||
* @returns true if a character repeats sequentially for at least the threshold amount
|
||||
*/
|
||||
export function hasLongSequentialRepeat(text: string, threshold = 1000): boolean {
|
||||
try {
|
||||
// Validate inputs
|
||||
if (
|
||||
text === null ||
|
||||
typeof text !== 'string' ||
|
||||
text.length === 0 ||
|
||||
threshold <= 0 ||
|
||||
text.length < threshold
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
// Use string iterator to avoid creating array copy (memory efficient)
|
||||
const iterator = text[Symbol.iterator]();
|
||||
let prev = iterator.next();
|
||||
|
||||
if (prev.done) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let count = 1;
|
||||
for (const char of iterator) {
|
||||
if (char === prev.value) {
|
||||
count++;
|
||||
if (count >= threshold) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
count = 1;
|
||||
prev = { value: char, done: false };
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
// On any error, return false to allow normal processing
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { Agent, ProxyAgent } from 'undici';
|
||||
|
||||
import { getProxyAgent, proxyFetch } from '../httpProxyAgent';
|
||||
import { getProxyAgent, proxyFetch } from './http-proxy-agent';
|
||||
|
||||
// Mock the dependencies
|
||||
jest.mock('undici', () => ({
|
||||
|
|
@ -12,8 +12,8 @@ import pick from 'lodash/pick';
|
|||
import type { IDataObject, ISupplyDataFunctions, JsonObject } from 'n8n-workflow';
|
||||
import { NodeConnectionTypes, NodeError, NodeOperationError } from 'n8n-workflow';
|
||||
|
||||
import { logAiEvent } from '@n8n/ai-utilities';
|
||||
import { estimateTokensFromStringList } from '@utils/tokenizer/token-estimator';
|
||||
import { logAiEvent } from './log-ai-event';
|
||||
import { estimateTokensFromStringList } from './tokenizer/token-estimator';
|
||||
|
||||
type TokensUsageParser = (result: LLMResult) => {
|
||||
completionTokens: number;
|
||||
|
|
@ -192,6 +192,7 @@ export class N8nLlmTracing extends BaseCallbackHandler {
|
|||
const runDetails = this.runsMap[runId] ?? { index: Object.keys(this.runsMap).length };
|
||||
|
||||
// Filter out non-x- headers to avoid leaking sensitive information in logs
|
||||
// eslint-disable-next-line no-prototype-builtins
|
||||
if (typeof error === 'object' && error?.hasOwnProperty('headers')) {
|
||||
const errorWithHeaders = error as { headers: Record<string, unknown> };
|
||||
|
||||
|
|
@ -220,6 +221,7 @@ export class N8nLlmTracing extends BaseCallbackHandler {
|
|||
}
|
||||
|
||||
logAiEvent(this.executionFunctions, 'ai-llm-errored', {
|
||||
// eslint-disable-next-line @typescript-eslint/no-base-to-string
|
||||
error: Object.keys(error).length === 0 ? error.toString() : error,
|
||||
runId,
|
||||
parentRunId,
|
||||
|
|
@ -6,7 +6,7 @@
|
|||
import type { TiktokenEncoding } from 'js-tiktoken/lite';
|
||||
import { Tiktoken } from 'js-tiktoken/lite';
|
||||
|
||||
import { getEncoding, encodingForModel } from '../tokenizer/tiktoken';
|
||||
import { getEncoding, encodingForModel } from '../tiktoken';
|
||||
|
||||
jest.mock('js-tiktoken/lite', () => ({
|
||||
Tiktoken: jest.fn(),
|
||||
|
|
@ -39,6 +39,7 @@ describe('tiktoken utils', () => {
|
|||
throw new Error(`Unexpected file path: ${path}`);
|
||||
});
|
||||
|
||||
// eslint-disable-next-line n8n-local-rules/no-uncaught-json-parse
|
||||
mockJsonParse.mockImplementation((content: string) => JSON.parse(content));
|
||||
});
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ import {
|
|||
} from 'n8n-workflow';
|
||||
|
||||
import { numberInputsProperty, configuredInputs } from './helpers';
|
||||
import { N8nLlmTracing } from '../llms/N8nLlmTracing';
|
||||
import { N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import { N8nNonEstimatingTracing } from '../llms/N8nNonEstimatingTracing';
|
||||
|
||||
interface ModeleSelectionRule {
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import { NodeOperationError, NodeConnectionTypes } from 'n8n-workflow';
|
|||
import { ModelSelector } from '../ModelSelector.node';
|
||||
|
||||
// Mock the N8nLlmTracing module completely to avoid module resolution issues
|
||||
jest.mock('../../llms/N8nLlmTracing', () => ({
|
||||
jest.mock('@n8n/ai-utilities', () => ({
|
||||
N8nLlmTracing: jest.fn().mockImplementation(() => ({
|
||||
handleLLMStart: jest.fn(),
|
||||
handleLLMEnd: jest.fn(),
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import type { BaseChatMemory } from '@langchain/classic/memory';
|
||||
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||
import { NodeOperationError } from 'n8n-workflow';
|
||||
import type { IExecuteFunctions, ISupplyDataFunctions, INodeExecutionData } from 'n8n-workflow';
|
||||
import assert from 'node:assert';
|
||||
|
|
|
|||
|
|
@ -2,8 +2,7 @@ import type { BedrockRuntimeClientConfig } from '@aws-sdk/client-bedrock-runtime
|
|||
import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
|
||||
import { BedrockEmbeddings } from '@langchain/aws';
|
||||
import { NodeHttpHandler } from '@smithy/node-http-handler';
|
||||
import { getNodeProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { logWrapper } from '@n8n/ai-utilities';
|
||||
import { getNodeProxyAgent, logWrapper } from '@n8n/ai-utilities';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { AzureOpenAIEmbeddings } from '@langchain/openai';
|
||||
import { getProxyAgent, logWrapper } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,8 +8,6 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { logWrapper } from '@n8n/ai-utilities';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
export class EmbeddingsAzureOpenAi implements INodeType {
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ import {
|
|||
import type { ClientOptions } from 'openai';
|
||||
|
||||
import { checkDomainRestrictions } from '@utils/checkDomainRestrictions';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { logWrapper } from '@n8n/ai-utilities';
|
||||
import { getProxyAgent, logWrapper } from '@n8n/ai-utilities';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
const modelParameter: INodeProperties = {
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ jest.mock('@langchain/openai');
|
|||
|
||||
class MockProxyAgent {}
|
||||
|
||||
jest.mock('@utils/httpProxyAgent', () => ({
|
||||
jest.mock('@n8n/ai-utilities', () => ({
|
||||
logWrapper: jest.fn().mockImplementation(() => jest.fn()),
|
||||
getProxyAgent: jest.fn().mockImplementation(() => new MockProxyAgent()),
|
||||
}));
|
||||
|
||||
|
|
|
|||
|
|
@ -1,19 +1,18 @@
|
|||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import type { LLMResult } from '@langchain/core/outputs';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodePropertyOptions,
|
||||
type INodeProperties,
|
||||
type ISupplyDataFunctions,
|
||||
type INodePropertyOptions,
|
||||
type INodeType,
|
||||
type INodeTypeDescription,
|
||||
type ISupplyDataFunctions,
|
||||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { searchModels } from './methods/searchModels';
|
||||
|
||||
const modelField: INodeProperties = {
|
||||
|
|
|
|||
|
|
@ -2,23 +2,19 @@
|
|||
/* eslint-disable @typescript-eslint/unbound-method */
|
||||
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
|
||||
import type { INode, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../../N8nLlmTracing';
|
||||
import { LmChatAnthropic } from '../LmChatAnthropic.node';
|
||||
|
||||
jest.mock('@langchain/anthropic');
|
||||
jest.mock('../../N8nLlmTracing');
|
||||
jest.mock('../../n8nLlmFailedAttemptHandler');
|
||||
jest.mock('@utils/httpProxyAgent', () => ({
|
||||
getProxyAgent: jest.fn().mockReturnValue({}),
|
||||
}));
|
||||
jest.mock('@n8n/ai-utilities');
|
||||
|
||||
const MockedChatAnthropic = jest.mocked(ChatAnthropic);
|
||||
const MockedN8nLlmTracing = jest.mocked(N8nLlmTracing);
|
||||
const mockedMakeN8nLlmFailedAttemptHandler = jest.mocked(makeN8nLlmFailedAttemptHandler);
|
||||
const mockedGetProxyAgent = jest.mocked(getProxyAgent);
|
||||
|
||||
describe('LmChatAnthropic', () => {
|
||||
let lmChatAnthropic: LmChatAnthropic;
|
||||
|
|
@ -50,7 +46,7 @@ describe('LmChatAnthropic', () => {
|
|||
// Mock the constructors/functions properly
|
||||
MockedN8nLlmTracing.mockImplementation(() => ({}) as N8nLlmTracing);
|
||||
mockedMakeN8nLlmFailedAttemptHandler.mockReturnValue(jest.fn());
|
||||
|
||||
mockedGetProxyAgent.mockReturnValue({} as any);
|
||||
return mockContext;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -10,11 +11,8 @@ import {
|
|||
import type { LemonadeApiCredentialsType } from '../../../credentials/LemonadeApi.credentials';
|
||||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
|
||||
import { lemonadeModel, lemonadeOptions, lemonadeDescription } from '../LMLemonade/description';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatLemonade implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import type { ChatOllamaInput } from '@langchain/ollama';
|
||||
import { ChatOllama } from '@langchain/ollama';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, proxyFetch } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -9,11 +10,8 @@ import {
|
|||
} from 'n8n-workflow';
|
||||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import { proxyFetch } from '@utils/httpProxyAgent';
|
||||
|
||||
import { ollamaModel, ollamaOptions, ollamaDescription } from '../LMOllama/description';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatOllama implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -11,12 +11,10 @@ import {
|
|||
} from 'n8n-workflow';
|
||||
|
||||
import { checkDomainRestrictions } from '@utils/checkDomainRestrictions';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { formatBuiltInTools, prepareAdditionalResponsesParams } from './common';
|
||||
import { searchModels } from './methods/loadModels';
|
||||
import type { ModelOptions } from './types';
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import type { ILoadOptionsFunctions, INodeListSearchResult } from 'n8n-workflow'
|
|||
import OpenAI from 'openai';
|
||||
|
||||
import { shouldIncludeModel } from '../../../vendors/OpenAi/helpers/modelFiltering';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { Container } from '@n8n/di';
|
||||
import { AiConfig } from '@n8n/config';
|
||||
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ import {
|
|||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export class LmCohere implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ import type { LemonadeApiCredentialsType } from '../../../credentials/LemonadeAp
|
|||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { lemonadeDescription, lemonadeModel, lemonadeOptions } from './description';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export class LmLemonade implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -10,8 +10,7 @@ import {
|
|||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { ollamaDescription, ollamaModel, ollamaOptions } from './description';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export class LmOllama implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { OpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import { NodeConnectionTypes } from 'n8n-workflow';
|
||||
import type {
|
||||
INodeType,
|
||||
|
|
@ -8,13 +9,9 @@ import type {
|
|||
ILoadOptionsFunctions,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { Container } from '@n8n/di';
|
||||
import { AiConfig } from '@n8n/config';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
type LmOpenAiOptions = {
|
||||
baseURL?: string;
|
||||
frequencyPenalty?: number;
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ import {
|
|||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export class LmOpenHuggingFaceInference implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
import type { BedrockRuntimeClientConfig } from '@aws-sdk/client-bedrock-runtime';
|
||||
import { BedrockRuntimeClient } from '@aws-sdk/client-bedrock-runtime';
|
||||
import { ChatBedrockConverse } from '@langchain/aws';
|
||||
import {
|
||||
getNodeProxyAgent,
|
||||
makeN8nLlmFailedAttemptHandler,
|
||||
N8nLlmTracing,
|
||||
} from '@n8n/ai-utilities';
|
||||
import { NodeHttpHandler } from '@smithy/node-http-handler';
|
||||
import { getNodeProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
|
|
@ -12,9 +16,6 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatAwsBedrock implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
displayName: 'AWS Bedrock Chat Model',
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { AzureChatOpenAI } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeOperationError,
|
||||
NodeConnectionTypes,
|
||||
|
|
@ -8,8 +9,6 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
|
||||
import { setupApiKeyAuthentication } from './credentials/api-key';
|
||||
import { setupOAuth2Authentication } from './credentials/oauth2';
|
||||
import { properties } from './properties';
|
||||
|
|
@ -19,8 +18,6 @@ import type {
|
|||
AzureOpenAIOAuth2ModelConfig,
|
||||
AzureOpenAIOptions,
|
||||
} from './types';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatAzureOpenAi implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ import type {
|
|||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export function tokensUsageParser(result: LLMResult): {
|
||||
completionTokens: number;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,13 +8,10 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import type { OpenAICompatibleCredential } from '../../../types/types';
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatDeepSeek implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@ import type {
|
|||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { getAdditionalOptions } from '../gemini-common/additional-options';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
function errorDescriptionMapper(error: NodeError) {
|
||||
if (error.description?.includes('properties: should be non-empty for OBJECT type')) {
|
||||
|
|
|
|||
|
|
@ -18,8 +18,7 @@ import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
|||
|
||||
import { makeErrorFromStatus } from './error-handling';
|
||||
import { getAdditionalOptions } from '../gemini-common/additional-options';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
export class LmChatGoogleVertex implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
import { ChatVertexAI } from '@langchain/google-vertexai';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
|
||||
import type { INode, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../../N8nLlmTracing';
|
||||
import { LmChatGoogleVertex } from '../LmChatGoogleVertex.node';
|
||||
|
||||
jest.mock('@langchain/google-vertexai');
|
||||
jest.mock('../../N8nLlmTracing');
|
||||
jest.mock('../../n8nLlmFailedAttemptHandler');
|
||||
jest.mock('@n8n/ai-utilities');
|
||||
jest.mock('n8n-nodes-base/dist/utils/utilities', () => ({
|
||||
formatPrivateKey: jest.fn().mockImplementation((key: string) => key),
|
||||
}));
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatGroq } from '@langchain/groq';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,12 +8,8 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatGroq implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
displayName: 'Groq Chat Model',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import type { ChatMistralAIInput } from '@langchain/mistralai';
|
||||
import { ChatMistralAI } from '@langchain/mistralai';
|
||||
import { HTTPClient } from '@mistralai/mistralai/lib/http.js';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, proxyFetch } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -10,10 +11,6 @@ import {
|
|||
} from 'n8n-workflow';
|
||||
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
import { proxyFetch } from '@utils/httpProxyAgent';
|
||||
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
const deprecatedMagistralModelsWithTextOutput = ['magistral-small-2506', 'magistral-medium-2506'];
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,13 +8,10 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import type { OpenAICompatibleCredential } from '../../../types/types';
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatOpenRouter implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,13 +8,10 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import type { OpenAICompatibleCredential } from '../../../types/types';
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatVercelAiGateway implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ChatOpenAI, type ClientOptions } from '@langchain/openai';
|
||||
import { getProxyAgent, makeN8nLlmFailedAttemptHandler, N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
import {
|
||||
NodeConnectionTypes,
|
||||
type INodeType,
|
||||
|
|
@ -7,13 +8,10 @@ import {
|
|||
type SupplyData,
|
||||
} from 'n8n-workflow';
|
||||
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getConnectionHintNoticeField } from '@utils/sharedFields';
|
||||
|
||||
import type { OpenAICompatibleCredential } from '../../../types/types';
|
||||
import { openAiFailedAttemptHandler } from '../../vendors/OpenAi/helpers/error-handling';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
export class LmChatXAiGrok implements INodeType {
|
||||
description: INodeTypeDescription = {
|
||||
|
|
|
|||
|
|
@ -1,23 +1,19 @@
|
|||
/* eslint-disable n8n-nodes-base/node-filename-against-convention */
|
||||
/* eslint-disable @typescript-eslint/unbound-method */
|
||||
import { ChatAnthropic } from '@langchain/anthropic';
|
||||
import { N8nLlmTracing, makeN8nLlmFailedAttemptHandler, getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
|
||||
import type { ILoadOptionsFunctions, INode, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
|
||||
import { LmChatAnthropic } from '../LMChatAnthropic/LmChatAnthropic.node';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
|
||||
jest.mock('@langchain/anthropic');
|
||||
jest.mock('../N8nLlmTracing');
|
||||
jest.mock('../n8nLlmFailedAttemptHandler');
|
||||
jest.mock('@utils/httpProxyAgent', () => ({
|
||||
getProxyAgent: jest.fn().mockReturnValue({}),
|
||||
}));
|
||||
jest.mock('@n8n/ai-utilities');
|
||||
|
||||
const MockedChatAnthropic = jest.mocked(ChatAnthropic);
|
||||
const MockedN8nLlmTracing = jest.mocked(N8nLlmTracing);
|
||||
const mockedMakeN8nLlmFailedAttemptHandler = jest.mocked(makeN8nLlmFailedAttemptHandler);
|
||||
const mockedGetProxyAgent = jest.mocked(getProxyAgent);
|
||||
|
||||
describe('LmChatAnthropic', () => {
|
||||
let lmChatAnthropic: LmChatAnthropic;
|
||||
|
|
@ -49,7 +45,7 @@ describe('LmChatAnthropic', () => {
|
|||
// Mock the constructors/functions properly
|
||||
MockedN8nLlmTracing.mockImplementation(() => ({}) as any);
|
||||
mockedMakeN8nLlmFailedAttemptHandler.mockReturnValue(jest.fn());
|
||||
|
||||
mockedGetProxyAgent.mockReturnValue({} as any);
|
||||
return mockContext;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
/* eslint-disable n8n-nodes-base/node-filename-against-convention */
|
||||
/* eslint-disable @typescript-eslint/unbound-method */
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { makeN8nLlmFailedAttemptHandler, N8nLlmTracing, getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { AiConfig } from '@n8n/config';
|
||||
import { Container } from '@n8n/di';
|
||||
import { createMockExecuteFunction } from 'n8n-nodes-base/test/nodes/Helpers';
|
||||
|
|
@ -8,21 +9,16 @@ import type { IDataObject, INode, ISupplyDataFunctions } from 'n8n-workflow';
|
|||
|
||||
import * as common from '../LMChatOpenAi/common';
|
||||
import { LmChatOpenAi } from '../LMChatOpenAi/LmChatOpenAi.node';
|
||||
import { makeN8nLlmFailedAttemptHandler } from '../n8nLlmFailedAttemptHandler';
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
|
||||
jest.mock('@langchain/openai');
|
||||
jest.mock('../N8nLlmTracing');
|
||||
jest.mock('../n8nLlmFailedAttemptHandler');
|
||||
jest.mock('@n8n/ai-utilities');
|
||||
jest.mock('../LMChatOpenAi/common');
|
||||
jest.mock('@utils/httpProxyAgent', () => ({
|
||||
getProxyAgent: jest.fn().mockReturnValue({}),
|
||||
}));
|
||||
|
||||
const MockedChatOpenAI = jest.mocked(ChatOpenAI);
|
||||
const MockedN8nLlmTracing = jest.mocked(N8nLlmTracing);
|
||||
const mockedMakeN8nLlmFailedAttemptHandler = jest.mocked(makeN8nLlmFailedAttemptHandler);
|
||||
const mockedCommon = jest.mocked(common);
|
||||
const mockedGetProxyAgent = jest.mocked(getProxyAgent);
|
||||
const { openAiDefaultHeaders: defaultHeaders } = Container.get(AiConfig);
|
||||
|
||||
describe('LmChatOpenAi', () => {
|
||||
|
|
@ -55,7 +51,7 @@ describe('LmChatOpenAi', () => {
|
|||
// Mock the constructors/functions properly
|
||||
MockedN8nLlmTracing.mockImplementation(() => ({}) as any);
|
||||
mockedMakeN8nLlmFailedAttemptHandler.mockReturnValue(jest.fn());
|
||||
|
||||
mockedGetProxyAgent.mockReturnValue({} as any);
|
||||
return mockContext;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import { mock } from 'jest-mock-extended';
|
|||
import type { IDataObject, ISupplyDataFunctions } from 'n8n-workflow';
|
||||
import { NodeOperationError, NodeApiError } from 'n8n-workflow';
|
||||
|
||||
import { N8nLlmTracing } from '../N8nLlmTracing';
|
||||
import { N8nLlmTracing } from '@n8n/ai-utilities';
|
||||
|
||||
describe('N8nLlmTracing', () => {
|
||||
const executionFunctions = mock<ISupplyDataFunctions>({
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import type {
|
|||
} from 'n8n-workflow';
|
||||
import { createResultError, createResultOk, NodeOperationError } from 'n8n-workflow';
|
||||
|
||||
import { proxyFetch } from '@utils/httpProxyAgent';
|
||||
import { proxyFetch } from '@n8n/ai-utilities';
|
||||
|
||||
import type { McpAuthenticationOption, McpServerTransport, McpTool } from './types';
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
import type { TokenTextSplitterParams } from '@langchain/textsplitters';
|
||||
import { TextSplitter } from '@langchain/textsplitters';
|
||||
import { hasLongSequentialRepeat } from '@utils/helpers';
|
||||
import { getEncoding } from '@utils/tokenizer/tiktoken';
|
||||
import { estimateTextSplitsByTokens } from '@utils/tokenizer/token-estimator';
|
||||
import {
|
||||
hasLongSequentialRepeat,
|
||||
getEncoding,
|
||||
estimateTextSplitsByTokens,
|
||||
} from '@n8n/ai-utilities';
|
||||
import type * as tiktoken from 'js-tiktoken';
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,13 +1,9 @@
|
|||
import * as aiUtilities from '@n8n/ai-utilities';
|
||||
import { OperationalError } from 'n8n-workflow';
|
||||
|
||||
import * as helpers from '../../../../utils/helpers';
|
||||
import * as tiktokenUtils from '../../../../utils/tokenizer/tiktoken';
|
||||
import * as tokenEstimator from '../../../../utils/tokenizer/token-estimator';
|
||||
import { TokenTextSplitter } from '../TokenTextSplitter';
|
||||
|
||||
jest.mock('../../../../utils/tokenizer/tiktoken');
|
||||
jest.mock('../../../../utils/helpers');
|
||||
jest.mock('../../../../utils/tokenizer/token-estimator');
|
||||
jest.mock('@n8n/ai-utilities');
|
||||
|
||||
describe('TokenTextSplitter', () => {
|
||||
let mockTokenizer: jest.Mocked<{
|
||||
|
|
@ -20,9 +16,9 @@ describe('TokenTextSplitter', () => {
|
|||
encode: jest.fn(),
|
||||
decode: jest.fn(),
|
||||
};
|
||||
(tiktokenUtils.getEncoding as jest.Mock).mockReturnValue(mockTokenizer);
|
||||
(aiUtilities.getEncoding as jest.Mock).mockReturnValue(mockTokenizer);
|
||||
// Default mock for hasLongSequentialRepeat - no repetition
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
|
|
@ -85,7 +81,7 @@ describe('TokenTextSplitter', () => {
|
|||
|
||||
const result = await splitter.splitText(inputText);
|
||||
|
||||
expect(tiktokenUtils.getEncoding).toHaveBeenCalledWith('cl100k_base');
|
||||
expect(aiUtilities.getEncoding).toHaveBeenCalledWith('cl100k_base');
|
||||
expect(mockTokenizer.encode).toHaveBeenCalledWith(inputText, [], 'all');
|
||||
expect(result).toEqual(['Hello world,', ' this is', ' a test']);
|
||||
});
|
||||
|
|
@ -129,7 +125,7 @@ describe('TokenTextSplitter', () => {
|
|||
|
||||
await splitter.splitText(inputText);
|
||||
|
||||
expect(tiktokenUtils.getEncoding).toHaveBeenCalledWith('o200k_base');
|
||||
expect(aiUtilities.getEncoding).toHaveBeenCalledWith('o200k_base');
|
||||
expect(mockTokenizer.encode).toHaveBeenCalledWith(inputText, ['<|special|>'], ['<|bad|>']);
|
||||
});
|
||||
|
||||
|
|
@ -141,7 +137,7 @@ describe('TokenTextSplitter', () => {
|
|||
await splitter.splitText('first call');
|
||||
await splitter.splitText('second call');
|
||||
|
||||
expect(tiktokenUtils.getEncoding).toHaveBeenCalledTimes(1);
|
||||
expect(aiUtilities.getEncoding).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should handle large text with multiple chunks and overlap', async () => {
|
||||
|
|
@ -180,18 +176,18 @@ describe('TokenTextSplitter', () => {
|
|||
const repetitiveText = 'a'.repeat(1000);
|
||||
const estimatedChunks = ['chunk1', 'chunk2', 'chunk3'];
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(estimatedChunks);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(estimatedChunks);
|
||||
|
||||
const result = await splitter.splitText(repetitiveText);
|
||||
|
||||
// Should not call tiktoken
|
||||
expect(tiktokenUtils.getEncoding).not.toHaveBeenCalled();
|
||||
expect(aiUtilities.getEncoding).not.toHaveBeenCalled();
|
||||
expect(mockTokenizer.encode).not.toHaveBeenCalled();
|
||||
|
||||
// Should use estimation
|
||||
expect(helpers.hasLongSequentialRepeat).toHaveBeenCalledWith(repetitiveText);
|
||||
expect(tokenEstimator.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
expect(aiUtilities.hasLongSequentialRepeat).toHaveBeenCalledWith(repetitiveText);
|
||||
expect(aiUtilities.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
repetitiveText,
|
||||
100,
|
||||
10,
|
||||
|
|
@ -210,21 +206,21 @@ describe('TokenTextSplitter', () => {
|
|||
const normalText = 'This is normal text without repetition';
|
||||
const mockTokenIds = [1, 2, 3, 4, 5, 6];
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
mockTokenizer.encode.mockReturnValue(mockTokenIds);
|
||||
mockTokenizer.decode.mockImplementation(() => 'chunk');
|
||||
|
||||
await splitter.splitText(normalText);
|
||||
|
||||
// Should check for repetition
|
||||
expect(helpers.hasLongSequentialRepeat).toHaveBeenCalledWith(normalText);
|
||||
expect(aiUtilities.hasLongSequentialRepeat).toHaveBeenCalledWith(normalText);
|
||||
|
||||
// Should use tiktoken
|
||||
expect(tiktokenUtils.getEncoding).toHaveBeenCalled();
|
||||
expect(aiUtilities.getEncoding).toHaveBeenCalled();
|
||||
expect(mockTokenizer.encode).toHaveBeenCalled();
|
||||
|
||||
// Should not use estimation
|
||||
expect(tokenEstimator.estimateTextSplitsByTokens).not.toHaveBeenCalled();
|
||||
expect(aiUtilities.estimateTextSplitsByTokens).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle repetitive content with different encodings', async () => {
|
||||
|
|
@ -237,12 +233,12 @@ describe('TokenTextSplitter', () => {
|
|||
const repetitiveText = '.'.repeat(500);
|
||||
const estimatedChunks = ['estimated chunk 1', 'estimated chunk 2'];
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(estimatedChunks);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(estimatedChunks);
|
||||
|
||||
const result = await splitter.splitText(repetitiveText);
|
||||
|
||||
expect(tokenEstimator.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
expect(aiUtilities.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
repetitiveText,
|
||||
50,
|
||||
5,
|
||||
|
|
@ -255,12 +251,12 @@ describe('TokenTextSplitter', () => {
|
|||
const splitter = new TokenTextSplitter();
|
||||
const edgeText = 'x'.repeat(100);
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(['single chunk']);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(['single chunk']);
|
||||
|
||||
const result = await splitter.splitText(edgeText);
|
||||
|
||||
expect(helpers.hasLongSequentialRepeat).toHaveBeenCalledWith(edgeText);
|
||||
expect(aiUtilities.hasLongSequentialRepeat).toHaveBeenCalledWith(edgeText);
|
||||
expect(result).toEqual(['single chunk']);
|
||||
});
|
||||
|
||||
|
|
@ -268,16 +264,13 @@ describe('TokenTextSplitter', () => {
|
|||
const splitter = new TokenTextSplitter();
|
||||
const mixedText = 'Normal text ' + 'z'.repeat(200) + ' more normal text';
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue([
|
||||
'chunk1',
|
||||
'chunk2',
|
||||
]);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(true);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(['chunk1', 'chunk2']);
|
||||
|
||||
const result = await splitter.splitText(mixedText);
|
||||
|
||||
expect(helpers.hasLongSequentialRepeat).toHaveBeenCalledWith(mixedText);
|
||||
expect(tokenEstimator.estimateTextSplitsByTokens).toHaveBeenCalled();
|
||||
expect(aiUtilities.hasLongSequentialRepeat).toHaveBeenCalledWith(mixedText);
|
||||
expect(aiUtilities.estimateTextSplitsByTokens).toHaveBeenCalled();
|
||||
expect(result).toEqual(['chunk1', 'chunk2']);
|
||||
});
|
||||
});
|
||||
|
|
@ -305,18 +298,16 @@ describe('TokenTextSplitter', () => {
|
|||
const splitter = new TokenTextSplitter();
|
||||
const text = 'This will cause tiktoken to fail';
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
(tiktokenUtils.getEncoding as jest.Mock).mockImplementation(() => {
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
(aiUtilities.getEncoding as jest.Mock).mockImplementation(() => {
|
||||
throw new Error('Tiktoken error');
|
||||
});
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue([
|
||||
'fallback chunk',
|
||||
]);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(['fallback chunk']);
|
||||
|
||||
const result = await splitter.splitText(text);
|
||||
|
||||
expect(result).toEqual(['fallback chunk']);
|
||||
expect(tokenEstimator.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
expect(aiUtilities.estimateTextSplitsByTokens).toHaveBeenCalledWith(
|
||||
text,
|
||||
splitter.chunkSize,
|
||||
splitter.chunkOverlap,
|
||||
|
|
@ -328,13 +319,11 @@ describe('TokenTextSplitter', () => {
|
|||
const splitter = new TokenTextSplitter();
|
||||
const text = 'This will cause encode to fail';
|
||||
|
||||
(helpers.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
(aiUtilities.hasLongSequentialRepeat as jest.Mock).mockReturnValue(false);
|
||||
mockTokenizer.encode.mockImplementation(() => {
|
||||
throw new OperationalError('Encode error');
|
||||
});
|
||||
(tokenEstimator.estimateTextSplitsByTokens as jest.Mock).mockReturnValue([
|
||||
'fallback chunk',
|
||||
]);
|
||||
(aiUtilities.estimateTextSplitsByTokens as jest.Mock).mockReturnValue(['fallback chunk']);
|
||||
|
||||
const result = await splitter.splitText(text);
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import { getTracingConfig } from '@utils/tracing';
|
|||
|
||||
import { formatToOpenAIAssistantTool, getChatMessages } from '../../../helpers/utils';
|
||||
import { assistantRLC } from '../descriptions';
|
||||
import { getProxyAgent } from '@utils/httpProxyAgent';
|
||||
import { getProxyAgent } from '@n8n/ai-utilities';
|
||||
import { Container } from '@n8n/di';
|
||||
import { AiConfig } from '@n8n/config';
|
||||
import { checkDomainRestrictions } from '@utils/checkDomainRestrictions';
|
||||
|
|
|
|||
|
|
@ -25,8 +25,7 @@
|
|||
"dev": "pnpm run watch",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"copy-nodes-json": "node ../../nodes-base/scripts/copy-nodes-json.js .",
|
||||
"copy-tokenizer-json": "node scripts/copy-tokenizer-json.js .",
|
||||
"build": "tsc --build tsconfig.build.json && pnpm copy-nodes-json && tsc-alias -p tsconfig.build.json && pnpm copy-tokenizer-json && pnpm n8n-copy-static-files && pnpm n8n-generate-metadata",
|
||||
"build": "tsc --build tsconfig.build.json && pnpm copy-nodes-json && tsc-alias -p tsconfig.build.json && pnpm n8n-copy-static-files && pnpm n8n-generate-metadata",
|
||||
"format": "biome format --write .",
|
||||
"format:check": "biome ci .",
|
||||
"lint": "eslint nodes credentials utils --quiet",
|
||||
|
|
@ -256,9 +255,8 @@
|
|||
"form-data": "catalog:",
|
||||
"generate-schema": "2.6.0",
|
||||
"html-to-text": "9.0.5",
|
||||
"https-proxy-agent": "catalog:",
|
||||
"ignore": "^5.2.0",
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"js-tiktoken": "catalog:",
|
||||
"jsdom": "23.0.1",
|
||||
"langchain": "catalog:",
|
||||
"@langchain/classic": "1.0.5",
|
||||
|
|
@ -271,13 +269,11 @@
|
|||
"openai": "^6.9.0",
|
||||
"pdf-parse": "1.1.1",
|
||||
"pg": "catalog:",
|
||||
"proxy-from-env": "^1.1.0",
|
||||
"redis": "4.6.14",
|
||||
"sanitize-html": "2.12.1",
|
||||
"sqlite3": "5.1.7",
|
||||
"temp": "0.9.4",
|
||||
"tmp-promise": "3.0.3",
|
||||
"undici": "^6.21.0",
|
||||
"weaviate-client": "3.9.0",
|
||||
"zod": "catalog:",
|
||||
"zod-to-json-schema": "3.23.3"
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ function runCommand(command) {
|
|||
|
||||
// Run all post-build tasks
|
||||
runCommand('npx tsc-alias -p tsconfig.build.json');
|
||||
runCommand('node scripts/copy-tokenizer-json.js .');
|
||||
runCommand('node ../../nodes-base/scripts/copy-nodes-json.js .');
|
||||
runCommand('pnpm n8n-copy-static-files');
|
||||
runCommand('pnpm n8n-generate-metadata');
|
||||
|
|
|
|||
|
|
@ -238,50 +238,3 @@ export function unwrapNestedOutput(output: Record<string, unknown>): Record<stri
|
|||
|
||||
return output;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detects if a text contains a character that repeats sequentially for a specified threshold.
|
||||
* This is used to prevent performance issues with tiktoken on highly repetitive content.
|
||||
* @param text The text to check
|
||||
* @param threshold The minimum number of sequential repeats to detect (default: 1000)
|
||||
* @returns true if a character repeats sequentially for at least the threshold amount
|
||||
*/
|
||||
export function hasLongSequentialRepeat(text: string, threshold = 1000): boolean {
|
||||
try {
|
||||
// Validate inputs
|
||||
if (
|
||||
text === null ||
|
||||
typeof text !== 'string' ||
|
||||
text.length === 0 ||
|
||||
threshold <= 0 ||
|
||||
text.length < threshold
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
// Use string iterator to avoid creating array copy (memory efficient)
|
||||
const iterator = text[Symbol.iterator]();
|
||||
let prev = iterator.next();
|
||||
|
||||
if (prev.done) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let count = 1;
|
||||
for (const char of iterator) {
|
||||
if (char === prev.value) {
|
||||
count++;
|
||||
if (count >= threshold) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
count = 1;
|
||||
prev = { value: char, done: false };
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
// On any error, return false to allow normal processing
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import { z } from 'zod';
|
|||
import {
|
||||
escapeSingleCurlyBrackets,
|
||||
getConnectedTools,
|
||||
hasLongSequentialRepeat,
|
||||
unwrapNestedOutput,
|
||||
getSessionId,
|
||||
} from '../helpers';
|
||||
|
|
@ -486,107 +485,3 @@ describe('getSessionId', () => {
|
|||
expect(sessionId).toBe('customSessionId');
|
||||
});
|
||||
});
|
||||
|
||||
describe('hasLongSequentialRepeat', () => {
|
||||
it('should return false for text shorter than threshold', () => {
|
||||
const text = 'a'.repeat(99);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for normal text without repeats', () => {
|
||||
const text = 'This is a normal text without many sequential repeating characters.';
|
||||
expect(hasLongSequentialRepeat(text)).toBe(false);
|
||||
});
|
||||
|
||||
it('should return true for text with exactly threshold repeats', () => {
|
||||
const text = 'a'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return true for text with more than threshold repeats', () => {
|
||||
const text = 'b'.repeat(150);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect repeats in the middle of text', () => {
|
||||
const text = 'Normal text ' + 'x'.repeat(100) + ' more normal text';
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect repeats at the end of text', () => {
|
||||
const text = 'Normal text at the beginning' + 'z'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should work with different thresholds', () => {
|
||||
const text = 'a'.repeat(50);
|
||||
expect(hasLongSequentialRepeat(text, 30)).toBe(true);
|
||||
expect(hasLongSequentialRepeat(text, 60)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle special characters', () => {
|
||||
const text = '.'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle spaces', () => {
|
||||
const text = ' '.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle newlines', () => {
|
||||
const text = '\n'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should not detect non-sequential repeats', () => {
|
||||
const text = 'ababab'.repeat(50); // 300 chars but no sequential repeats
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle mixed content with repeats below threshold', () => {
|
||||
const text = 'aaa' + 'b'.repeat(50) + 'ccc' + 'd'.repeat(40) + 'eee';
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(hasLongSequentialRepeat('', 100)).toBe(false);
|
||||
});
|
||||
|
||||
it('should work with very large texts', () => {
|
||||
const normalText = 'Lorem ipsum dolor sit amet '.repeat(1000);
|
||||
const textWithRepeat = normalText + 'A'.repeat(100) + normalText;
|
||||
expect(hasLongSequentialRepeat(textWithRepeat, 100)).toBe(true);
|
||||
});
|
||||
|
||||
it('should detect unicode character repeats', () => {
|
||||
const text = '😀'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 100)).toBe(true);
|
||||
});
|
||||
|
||||
describe('error handling', () => {
|
||||
it('should handle null input', () => {
|
||||
expect(hasLongSequentialRepeat(null as any)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle undefined input', () => {
|
||||
expect(hasLongSequentialRepeat(undefined as any)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle non-string input', () => {
|
||||
expect(hasLongSequentialRepeat(123 as any)).toBe(false);
|
||||
expect(hasLongSequentialRepeat({} as any)).toBe(false);
|
||||
expect(hasLongSequentialRepeat([] as any)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle zero or negative threshold', () => {
|
||||
const text = 'a'.repeat(100);
|
||||
expect(hasLongSequentialRepeat(text, 0)).toBe(false);
|
||||
expect(hasLongSequentialRepeat(text, -1)).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle empty string', () => {
|
||||
expect(hasLongSequentialRepeat('', 100)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -120,6 +120,9 @@ catalogs:
|
|||
js-base64:
|
||||
specifier: 3.7.2
|
||||
version: 3.7.2
|
||||
js-tiktoken:
|
||||
specifier: 1.0.12
|
||||
version: 1.0.12
|
||||
jsonrepair:
|
||||
specifier: 3.13.1
|
||||
version: 3.13.1
|
||||
|
|
@ -504,15 +507,27 @@ importers:
|
|||
'@n8n/typescript-config':
|
||||
specifier: workspace:*
|
||||
version: link:../typescript-config
|
||||
https-proxy-agent:
|
||||
specifier: 'catalog:'
|
||||
version: 7.0.6
|
||||
js-tiktoken:
|
||||
specifier: 'catalog:'
|
||||
version: 1.0.12
|
||||
langchain:
|
||||
specifier: 'catalog:'
|
||||
version: 1.2.3(@langchain/core@1.1.8(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.5.0(@opentelemetry/api@1.9.0))(openai@6.9.1(ws@8.18.3(bufferutil@4.0.9)(utf-8-validate@5.0.10))(zod@3.25.67)))(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.204.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.5.0(@opentelemetry/api@1.9.0))(openai@6.9.1(ws@8.18.3(bufferutil@4.0.9)(utf-8-validate@5.0.10))(zod@3.25.67))(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(zod-to-json-schema@3.23.3(zod@3.25.67))
|
||||
n8n-workflow:
|
||||
specifier: workspace:*
|
||||
version: link:../../workflow
|
||||
proxy-from-env:
|
||||
specifier: ^1.1.0
|
||||
version: 1.1.0
|
||||
tmp-promise:
|
||||
specifier: 3.0.3
|
||||
version: 3.0.3
|
||||
undici:
|
||||
specifier: ^6.23.0
|
||||
version: 6.23.0
|
||||
zod:
|
||||
specifier: 3.25.67
|
||||
version: 3.25.67
|
||||
|
|
@ -1484,14 +1499,11 @@ importers:
|
|||
html-to-text:
|
||||
specifier: 9.0.5
|
||||
version: 9.0.5
|
||||
https-proxy-agent:
|
||||
specifier: 'catalog:'
|
||||
version: 7.0.6
|
||||
ignore:
|
||||
specifier: ^5.2.0
|
||||
version: 5.2.4
|
||||
js-tiktoken:
|
||||
specifier: ^1.0.12
|
||||
specifier: 'catalog:'
|
||||
version: 1.0.12
|
||||
jsdom:
|
||||
specifier: 23.0.1
|
||||
|
|
@ -1526,9 +1538,6 @@ importers:
|
|||
pg:
|
||||
specifier: 'catalog:'
|
||||
version: 8.17.0
|
||||
proxy-from-env:
|
||||
specifier: ^1.1.0
|
||||
version: 1.1.0
|
||||
redis:
|
||||
specifier: 4.6.14
|
||||
version: 4.6.14
|
||||
|
|
@ -1544,9 +1553,6 @@ importers:
|
|||
tmp-promise:
|
||||
specifier: 3.0.3
|
||||
version: 3.0.3
|
||||
undici:
|
||||
specifier: ^6.23.0
|
||||
version: 6.23.0
|
||||
vm2:
|
||||
specifier: 'catalog:'
|
||||
version: 3.10.2
|
||||
|
|
|
|||
|
|
@ -81,6 +81,7 @@ catalog:
|
|||
xss: 1.0.15
|
||||
zod: 3.25.67
|
||||
zod-to-json-schema: 3.23.3
|
||||
js-tiktoken: 1.0.12
|
||||
|
||||
catalogs:
|
||||
frontend:
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user