improvement(executor): redesign executor + add start block (#1790)

* fix(billing): should allow restoring subscription (#1728)

* fix(already-cancelled-sub): UI should allow restoring subscription

* restore functionality fixed

* fix

* improvement(start): revert to start block

* make it work with start block

* fix start block persistence

* cleanup triggers

* debounce status checks

* update docs

* improvement(start): revert to start block

* make it work with start block

* fix start block persistence

* cleanup triggers

* debounce status checks

* update docs

* SSE v0.1

* v0.2

* v0.3

* v0.4

* v0.5

* v0.6

* broken checkpoint

* Executor progress - everything preliminarily tested except while loops and triggers

* Executor fixes

* Fix var typing

* Implement while loop execution

* Loop and parallel result agg

* Refactor v1 - loops work

* Fix var resolution in for each loop

* Fix while loop condition and variable resolution

* Fix loop iteration counts

* Fix loop badges

* Clean logs

* Fix variable references from start block

* Fix condition block

* Fix conditional convergence

* Dont execute orphaned nodse

* Code cleanup 1 and error surfacing

* compile time try catch

* Some fixes

* Fix error throwing

* Sentinels v1

* Fix multiple start and end nodes in loop

* Edge restoration

* Fix reachable nodes execution

* Parallel subflows

* Fix loop/parallel sentinel convergence

* Loops and parallels orchestrator

* Split executor

* Variable resolution split

* Dag phase

* Refactor

* Refactor

* Refactor 3

* Lint + refactor

* Lint + cleanup + refactor

* Readability

* Initial logs

* Fix trace spans

* Console pills for iters

* Add input/output pills

* Checkpoint

* remove unused code

* THIS IS THE COMMIT THAT CAN BREAK A LOT OF THINGS

* ANOTHER BIG REFACTOR

* Lint + fix tests

* Fix webhook

* Remove comment

* Merge stash

* Fix triggers?

* Stuff

* Fix error port

* Lint

* Consolidate state

* Clean up some var resolution

* Remove some var resolution logs

* Fix chat

* Fix chat triggers

* Fix chat trigger fully

* Snapshot refactor

* Fix mcp and custom tools

* Lint

* Fix parallel default count and trace span overlay

* Agent purple

* Fix test

* Fix test

---------

Co-authored-by: Waleed <walif6@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Vikhyath Mondreti <vikhyath@simstudio.ai>
This commit is contained in:
Siddharth Ganesan
2025-11-02 12:21:16 -08:00
committed by GitHub
parent 7d67ae397d
commit 3bf00cbd2a
137 changed files with 8552 additions and 20440 deletions

View File

@@ -1,39 +1,335 @@
/**
* Enum defining all supported block types in the executor.
* This centralizes block type definitions and eliminates magic strings.
* Central constants and types for the executor
*
* Consolidates all magic strings, block types, edge handles, and type definitions
* used throughout the executor to eliminate duplication and improve type safety.
*/
/**
* Block types
*/
export enum BlockType {
// Control flow
PARALLEL = 'parallel',
LOOP = 'loop',
ROUTER = 'router',
CONDITION = 'condition',
// Triggers
START_TRIGGER = 'start_trigger',
STARTER = 'starter',
TRIGGER = 'trigger',
// Data processing
FUNCTION = 'function',
AGENT = 'agent',
API = 'api',
EVALUATOR = 'evaluator',
RESPONSE = 'response',
WORKFLOW = 'workflow', // Deprecated - kept for backwards compatibility
WORKFLOW_INPUT = 'workflow_input', // Current workflow block type
STARTER = 'starter',
VARIABLES = 'variables',
// I/O
RESPONSE = 'response',
WORKFLOW = 'workflow',
WORKFLOW_INPUT = 'workflow_input',
// Utilities
WAIT = 'wait',
// Infrastructure (virtual blocks)
SENTINEL_START = 'sentinel_start',
SENTINEL_END = 'sentinel_end',
}
/**
* Array of all block types for iteration and validation
* Trigger block types (blocks that can start a workflow)
*/
export const ALL_BLOCK_TYPES = Object.values(BlockType) as string[]
export const TRIGGER_BLOCK_TYPES = [
BlockType.START_TRIGGER,
BlockType.STARTER,
BlockType.TRIGGER,
] as const
/**
* Type guard to check if a string is a valid block type
* Metadata-only block types (not executable, just configuration)
*/
export function isValidBlockType(type: string): type is BlockType {
return ALL_BLOCK_TYPES.includes(type)
export const METADATA_ONLY_BLOCK_TYPES = [BlockType.LOOP, BlockType.PARALLEL] as const
/**
* Loop types
*/
export type LoopType = 'for' | 'forEach' | 'while' | 'doWhile'
/**
* Sentinel types
*/
export type SentinelType = 'start' | 'end'
/**
* Parallel types
*/
export type ParallelType = 'collection' | 'count'
export const EDGE = {
CONDITION_PREFIX: 'condition-',
CONDITION_TRUE: 'condition-true',
CONDITION_FALSE: 'condition-false',
ROUTER_PREFIX: 'router-',
LOOP_CONTINUE: 'loop_continue',
LOOP_CONTINUE_ALT: 'loop-continue-source',
LOOP_EXIT: 'loop_exit',
ERROR: 'error',
SOURCE: 'source',
DEFAULT: 'default',
} as const
/**
* Loop configuration
*/
export const LOOP = {
// Loop types
TYPE: {
FOR: 'for' as LoopType,
FOR_EACH: 'forEach' as LoopType,
WHILE: 'while' as LoopType,
DO_WHILE: 'doWhile',
},
// Sentinel node naming
SENTINEL: {
PREFIX: 'loop-',
START_SUFFIX: '-sentinel-start',
END_SUFFIX: '-sentinel-end',
START_TYPE: 'start' as SentinelType,
END_TYPE: 'end' as SentinelType,
},
} as const
/**
* Parallel configuration
*/
export const PARALLEL = {
// Parallel types
TYPE: {
COLLECTION: 'collection' as ParallelType,
COUNT: 'count' as ParallelType,
},
// Branch notation
BRANCH: {
PREFIX: '₍',
SUFFIX: '₎',
},
// Default values
DEFAULT_COUNT: 1,
} as const
/**
* Reference syntax for variable resolution
*/
export const REFERENCE = {
START: '<',
END: '>',
PATH_DELIMITER: '.',
ENV_VAR_START: '{{',
ENV_VAR_END: '}}',
PREFIX: {
LOOP: 'loop',
PARALLEL: 'parallel',
VARIABLE: 'variable',
},
} as const
export const SPECIAL_REFERENCE_PREFIXES = [
REFERENCE.PREFIX.LOOP,
REFERENCE.PREFIX.PARALLEL,
REFERENCE.PREFIX.VARIABLE,
] as const
/**
* Loop reference fields
*/
export const LOOP_REFERENCE = {
ITERATION: 'iteration',
INDEX: 'index',
ITEM: 'item',
INDEX_PATH: 'loop.index',
} as const
/**
* Parallel reference fields
*/
export const PARALLEL_REFERENCE = {
INDEX: 'index',
CURRENT_ITEM: 'currentItem',
ITEMS: 'items',
} as const
export const DEFAULTS = {
BLOCK_TYPE: 'unknown',
BLOCK_TITLE: 'Untitled Block',
WORKFLOW_NAME: 'Workflow',
MAX_LOOP_ITERATIONS: 1000,
MAX_WORKFLOW_DEPTH: 10,
EXECUTION_TIME: 0,
TOKENS: {
PROMPT: 0,
COMPLETION: 0,
TOTAL: 0,
},
COST: {
INPUT: 0,
OUTPUT: 0,
TOTAL: 0,
},
} as const
export const HTTP = {
STATUS: {
OK: 200,
FORBIDDEN: 403,
NOT_FOUND: 404,
TOO_MANY_REQUESTS: 429,
SERVER_ERROR: 500,
},
CONTENT_TYPE: {
JSON: 'application/json',
EVENT_STREAM: 'text/event-stream',
},
} as const
export const AGENT = {
DEFAULT_MODEL: 'gpt-4o',
DEFAULT_FUNCTION_TIMEOUT: 5000,
REQUEST_TIMEOUT: 120000,
CUSTOM_TOOL_PREFIX: 'custom_',
} as const
export const ROUTER = {
DEFAULT_MODEL: 'gpt-4o',
DEFAULT_TEMPERATURE: 0,
INFERENCE_TEMPERATURE: 0.1,
} as const
export const EVALUATOR = {
DEFAULT_MODEL: 'gpt-4o',
DEFAULT_TEMPERATURE: 0.1,
RESPONSE_SCHEMA_NAME: 'evaluation_response',
JSON_INDENT: 2,
} as const
export const CONDITION = {
ELSE_LABEL: 'else',
ELSE_TITLE: 'else',
} as const
export const PARSING = {
JSON_RADIX: 10,
PREVIEW_LENGTH: 200,
PREVIEW_SUFFIX: '...',
} as const
/**
* Condition configuration
*/
export interface ConditionConfig {
id: string
label?: string
condition: string
}
export function isTriggerBlockType(blockType: string | undefined): boolean {
return TRIGGER_BLOCK_TYPES.includes(blockType as any)
}
export function isMetadataOnlyBlockType(blockType: string | undefined): boolean {
return METADATA_ONLY_BLOCK_TYPES.includes(blockType as any)
}
/**
* Helper to check if a block type is a workflow block (current or deprecated)
*/
export function isWorkflowBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.WORKFLOW || blockType === BlockType.WORKFLOW_INPUT
}
export function isSentinelBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.SENTINEL_START || blockType === BlockType.SENTINEL_END
}
export function isConditionBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.CONDITION
}
export function isRouterBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.ROUTER
}
export function isAgentBlockType(blockType: string | undefined): boolean {
return blockType === BlockType.AGENT
}
export function getDefaultTokens() {
return {
prompt: DEFAULTS.TOKENS.PROMPT,
completion: DEFAULTS.TOKENS.COMPLETION,
total: DEFAULTS.TOKENS.TOTAL,
}
}
export function getDefaultCost() {
return {
input: DEFAULTS.COST.INPUT,
output: DEFAULTS.COST.OUTPUT,
total: DEFAULTS.COST.TOTAL,
}
}
export function buildReference(path: string): string {
return `${REFERENCE.START}${path}${REFERENCE.END}`
}
export function buildLoopReference(property: string): string {
return buildReference(`${REFERENCE.PREFIX.LOOP}${REFERENCE.PATH_DELIMITER}${property}`)
}
export function buildParallelReference(property: string): string {
return buildReference(`${REFERENCE.PREFIX.PARALLEL}${REFERENCE.PATH_DELIMITER}${property}`)
}
export function buildVariableReference(variableName: string): string {
return buildReference(`${REFERENCE.PREFIX.VARIABLE}${REFERENCE.PATH_DELIMITER}${variableName}`)
}
export function buildBlockReference(blockId: string, path?: string): string {
return buildReference(path ? `${blockId}${REFERENCE.PATH_DELIMITER}${path}` : blockId)
}
export function buildLoopIndexCondition(maxIterations: number): string {
return `${buildLoopReference(LOOP_REFERENCE.INDEX)} < ${maxIterations}`
}
export function buildEnvVarReference(varName: string): string {
return `${REFERENCE.ENV_VAR_START}${varName}${REFERENCE.ENV_VAR_END}`
}
export function isReference(value: string): boolean {
return value.startsWith(REFERENCE.START) && value.endsWith(REFERENCE.END)
}
export function isEnvVarReference(value: string): boolean {
return value.startsWith(REFERENCE.ENV_VAR_START) && value.endsWith(REFERENCE.ENV_VAR_END)
}
export function extractEnvVarName(reference: string): string {
return reference.substring(
REFERENCE.ENV_VAR_START.length,
reference.length - REFERENCE.ENV_VAR_END.length
)
}
export function extractReferenceContent(reference: string): string {
return reference.substring(REFERENCE.START.length, reference.length - REFERENCE.END.length)
}
export function parseReferencePath(reference: string): string[] {
const content = extractReferenceContent(reference)
return content.split(REFERENCE.PATH_DELIMITER)
}

View File

@@ -0,0 +1,84 @@
import { createLogger } from '@/lib/logs/console/logger'
import type {
SerializedBlock,
SerializedLoop,
SerializedParallel,
SerializedWorkflow,
} from '@/serializer/types'
import { EdgeConstructor } from './construction/edges'
import { LoopConstructor } from './construction/loops'
import { NodeConstructor } from './construction/nodes'
import { PathConstructor } from './construction/paths'
import type { DAGEdge, NodeMetadata } from './types'
const logger = createLogger('DAGBuilder')
export interface DAGNode {
id: string
block: SerializedBlock
incomingEdges: Set<string>
outgoingEdges: Map<string, DAGEdge>
metadata: NodeMetadata
}
export interface DAG {
nodes: Map<string, DAGNode>
loopConfigs: Map<string, SerializedLoop>
parallelConfigs: Map<string, SerializedParallel>
}
export class DAGBuilder {
private pathConstructor = new PathConstructor()
private loopConstructor = new LoopConstructor()
private nodeConstructor = new NodeConstructor()
private edgeConstructor = new EdgeConstructor()
build(workflow: SerializedWorkflow, triggerBlockId?: string): DAG {
const dag: DAG = {
nodes: new Map(),
loopConfigs: new Map(),
parallelConfigs: new Map(),
}
this.initializeConfigs(workflow, dag)
const reachableBlocks = this.pathConstructor.execute(workflow, triggerBlockId)
logger.debug('Reachable blocks from trigger:', {
triggerBlockId,
reachableCount: reachableBlocks.size,
totalBlocks: workflow.blocks.length,
})
this.loopConstructor.execute(dag, reachableBlocks)
const { blocksInLoops, blocksInParallels } = this.nodeConstructor.execute(
workflow,
dag,
reachableBlocks
)
this.edgeConstructor.execute(workflow, dag, blocksInParallels, blocksInLoops, reachableBlocks)
logger.info('DAG built', {
totalNodes: dag.nodes.size,
loopCount: dag.loopConfigs.size,
parallelCount: dag.parallelConfigs.size,
})
return dag
}
private initializeConfigs(workflow: SerializedWorkflow, dag: DAG): void {
if (workflow.loops) {
for (const [loopId, loopConfig] of Object.entries(workflow.loops)) {
dag.loopConfigs.set(loopId, loopConfig)
}
}
if (workflow.parallels) {
for (const [parallelId, parallelConfig] of Object.entries(workflow.parallels)) {
dag.parallelConfigs.set(parallelId, parallelConfig)
}
}
}
}

View File

@@ -0,0 +1,509 @@
import { createLogger } from '@/lib/logs/console/logger'
import { EDGE, isConditionBlockType, isRouterBlockType } from '@/executor/consts'
import {
buildBranchNodeId,
buildSentinelEndId,
buildSentinelStartId,
calculateBranchCount,
extractBaseBlockId,
parseDistributionItems,
} from '@/executor/utils/subflow-utils'
import type { SerializedWorkflow } from '@/serializer/types'
import type { DAG } from '../builder'
const logger = createLogger('EdgeConstructor')
interface ConditionConfig {
id: string
label?: string
condition: string
}
interface EdgeMetadata {
blockTypeMap: Map<string, string>
conditionConfigMap: Map<string, ConditionConfig[]>
routerBlockIds: Set<string>
}
export class EdgeConstructor {
execute(
workflow: SerializedWorkflow,
dag: DAG,
blocksInParallels: Set<string>,
blocksInLoops: Set<string>,
reachableBlocks: Set<string>
): void {
const loopBlockIds = new Set(dag.loopConfigs.keys())
const parallelBlockIds = new Set(dag.parallelConfigs.keys())
const metadata = this.buildMetadataMaps(workflow)
this.wireRegularEdges(
workflow,
dag,
blocksInParallels,
blocksInLoops,
reachableBlocks,
loopBlockIds,
parallelBlockIds,
metadata
)
this.wireLoopSentinels(dag, reachableBlocks)
this.wireParallelBlocks(workflow, dag, loopBlockIds, parallelBlockIds)
}
private buildMetadataMaps(workflow: SerializedWorkflow): EdgeMetadata {
const blockTypeMap = new Map<string, string>()
const conditionConfigMap = new Map<string, ConditionConfig[]>()
const routerBlockIds = new Set<string>()
for (const block of workflow.blocks) {
const blockType = block.metadata?.id ?? ''
blockTypeMap.set(block.id, blockType)
if (isConditionBlockType(blockType)) {
const conditions = this.parseConditionConfig(block)
if (conditions) {
conditionConfigMap.set(block.id, conditions)
}
} else if (isRouterBlockType(blockType)) {
routerBlockIds.add(block.id)
}
}
return { blockTypeMap, conditionConfigMap, routerBlockIds }
}
private parseConditionConfig(block: any): ConditionConfig[] | null {
try {
const conditionsJson = block.config.params?.conditions
if (typeof conditionsJson === 'string') {
return JSON.parse(conditionsJson)
}
if (Array.isArray(conditionsJson)) {
return conditionsJson
}
return null
} catch (error) {
logger.warn('Failed to parse condition config', {
blockId: block.id,
error: error instanceof Error ? error.message : String(error),
})
return null
}
}
private generateSourceHandle(
source: string,
target: string,
sourceHandle: string | undefined,
metadata: EdgeMetadata,
workflow: SerializedWorkflow
): string | undefined {
let handle = sourceHandle
if (!handle && isConditionBlockType(metadata.blockTypeMap.get(source) ?? '')) {
const conditions = metadata.conditionConfigMap.get(source)
if (conditions && conditions.length > 0) {
const edgesFromCondition = workflow.connections.filter((c) => c.source === source)
const edgeIndex = edgesFromCondition.findIndex((e) => e.target === target)
if (edgeIndex >= 0 && edgeIndex < conditions.length) {
const correspondingCondition = conditions[edgeIndex]
handle = `${EDGE.CONDITION_PREFIX}${correspondingCondition.id}`
}
}
}
if (metadata.routerBlockIds.has(source)) {
handle = `${EDGE.ROUTER_PREFIX}${target}`
logger.debug('Set router sourceHandle', { source, target, sourceHandle: handle })
}
return handle
}
private wireRegularEdges(
workflow: SerializedWorkflow,
dag: DAG,
blocksInParallels: Set<string>,
blocksInLoops: Set<string>,
reachableBlocks: Set<string>,
loopBlockIds: Set<string>,
parallelBlockIds: Set<string>,
metadata: EdgeMetadata
): void {
for (const connection of workflow.connections) {
let { source, target } = connection
let sourceHandle = this.generateSourceHandle(
source,
target,
connection.sourceHandle,
metadata,
workflow
)
const targetHandle = connection.targetHandle
const sourceIsLoopBlock = loopBlockIds.has(source)
const targetIsLoopBlock = loopBlockIds.has(target)
const sourceIsParallelBlock = parallelBlockIds.has(source)
const targetIsParallelBlock = parallelBlockIds.has(target)
if (
sourceIsLoopBlock ||
targetIsLoopBlock ||
sourceIsParallelBlock ||
targetIsParallelBlock
) {
if (sourceIsLoopBlock) {
const sentinelEndId = buildSentinelEndId(source)
if (!dag.nodes.has(sentinelEndId)) {
logger.debug('Skipping loop exit edge - sentinel not found', { source, target })
continue
}
source = sentinelEndId
sourceHandle = EDGE.LOOP_EXIT
logger.debug('Redirected loop exit edge', { from: sentinelEndId, to: target })
}
if (targetIsLoopBlock) {
const sentinelStartId = buildSentinelStartId(target)
if (!dag.nodes.has(sentinelStartId)) {
logger.debug('Skipping loop entry edge - sentinel not found', { source, target })
continue
}
target = sentinelStartId
logger.debug('Redirected loop entry edge', { from: source, to: sentinelStartId })
}
if (sourceIsParallelBlock || targetIsParallelBlock) {
continue
}
}
if (this.edgeCrossesLoopBoundary(source, target, blocksInLoops, dag)) {
logger.debug('Skipping edge that crosses loop boundary', { source, target })
continue
}
if (!this.isEdgeReachable(source, target, reachableBlocks, dag)) {
logger.debug('Skipping edge - not reachable', { source, target })
continue
}
if (blocksInParallels.has(source) && blocksInParallels.has(target)) {
const sourceParallelId = this.getParallelId(source, dag)
const targetParallelId = this.getParallelId(target, dag)
if (sourceParallelId === targetParallelId) {
this.wireParallelInternalEdge(
source,
target,
sourceParallelId!,
dag,
sourceHandle,
targetHandle
)
} else {
logger.warn('Edge between different parallels - invalid workflow', { source, target })
}
} else if (blocksInParallels.has(source) || blocksInParallels.has(target)) {
logger.debug('Skipping internal-to-external edge (handled by parallel wiring)', {
source,
target,
})
} else {
this.addEdge(dag, source, target, sourceHandle, targetHandle)
}
}
}
private wireLoopSentinels(dag: DAG, reachableBlocks: Set<string>): void {
for (const [loopId, loopConfig] of dag.loopConfigs) {
const nodes = loopConfig.nodes
if (nodes.length === 0) continue
const sentinelStartId = buildSentinelStartId(loopId)
const sentinelEndId = buildSentinelEndId(loopId)
if (!dag.nodes.has(sentinelStartId) || !dag.nodes.has(sentinelEndId)) {
logger.debug('Skipping sentinel wiring for unreachable loop', { loopId })
continue
}
const { startNodes, terminalNodes } = this.findLoopBoundaryNodes(nodes, dag, reachableBlocks)
logger.debug('Wiring sentinel nodes for loop', {
loopId,
startNodes,
terminalNodes,
})
for (const startNodeId of startNodes) {
this.addEdge(dag, sentinelStartId, startNodeId)
}
for (const terminalNodeId of terminalNodes) {
this.addEdge(dag, terminalNodeId, sentinelEndId)
}
this.addEdge(dag, sentinelEndId, sentinelStartId, EDGE.LOOP_CONTINUE, undefined, true)
logger.debug('Added backward edge for loop', { loopId })
}
}
private wireParallelBlocks(
workflow: SerializedWorkflow,
dag: DAG,
loopBlockIds: Set<string>,
parallelBlockIds: Set<string>
): void {
for (const [parallelId, parallelConfig] of dag.parallelConfigs) {
const nodes = parallelConfig.nodes
if (nodes.length === 0) continue
const { entryNodes, terminalNodes, branchCount } = this.findParallelBoundaryNodes(
nodes,
parallelId,
dag
)
logger.info('Wiring parallel block edges', {
parallelId,
entryNodes,
terminalNodes,
branchCount,
})
for (const connection of workflow.connections) {
const { source, target, sourceHandle, targetHandle } = connection
if (target === parallelId) {
if (loopBlockIds.has(source) || parallelBlockIds.has(source)) continue
if (nodes.includes(source)) {
logger.warn('Invalid: parallel block connected from its own internal node', {
parallelId,
source,
})
continue
}
logger.info('Wiring edge to parallel block', { source, parallelId, entryNodes })
for (const entryNodeId of entryNodes) {
for (let i = 0; i < branchCount; i++) {
const branchNodeId = buildBranchNodeId(entryNodeId, i)
if (dag.nodes.has(branchNodeId)) {
this.addEdge(dag, source, branchNodeId, sourceHandle, targetHandle)
}
}
}
}
if (source === parallelId) {
if (loopBlockIds.has(target) || parallelBlockIds.has(target)) continue
if (nodes.includes(target)) {
logger.warn('Invalid: parallel block connected to its own internal node', {
parallelId,
target,
})
continue
}
logger.info('Wiring edge from parallel block', { parallelId, target, terminalNodes })
for (const terminalNodeId of terminalNodes) {
for (let i = 0; i < branchCount; i++) {
const branchNodeId = buildBranchNodeId(terminalNodeId, i)
if (dag.nodes.has(branchNodeId)) {
this.addEdge(dag, branchNodeId, target, sourceHandle, targetHandle)
}
}
}
}
}
}
}
private edgeCrossesLoopBoundary(
source: string,
target: string,
blocksInLoops: Set<string>,
dag: DAG
): boolean {
const sourceInLoop = blocksInLoops.has(source)
const targetInLoop = blocksInLoops.has(target)
if (sourceInLoop !== targetInLoop) {
return true
}
if (!sourceInLoop && !targetInLoop) {
return false
}
let sourceLoopId: string | undefined
let targetLoopId: string | undefined
for (const [loopId, loopConfig] of dag.loopConfigs) {
if (loopConfig.nodes.includes(source)) {
sourceLoopId = loopId
}
if (loopConfig.nodes.includes(target)) {
targetLoopId = loopId
}
}
return sourceLoopId !== targetLoopId
}
private isEdgeReachable(
source: string,
target: string,
reachableBlocks: Set<string>,
dag: DAG
): boolean {
if (!reachableBlocks.has(source) && !dag.nodes.has(source)) {
return false
}
if (!reachableBlocks.has(target) && !dag.nodes.has(target)) {
return false
}
return true
}
private wireParallelInternalEdge(
source: string,
target: string,
parallelId: string,
dag: DAG,
sourceHandle?: string,
targetHandle?: string
): void {
const parallelConfig = dag.parallelConfigs.get(parallelId)
if (!parallelConfig) {
throw new Error(`Parallel config not found: ${parallelId}`)
}
const distributionItems = parseDistributionItems(parallelConfig)
const count = calculateBranchCount(parallelConfig, distributionItems)
for (let i = 0; i < count; i++) {
const sourceNodeId = buildBranchNodeId(source, i)
const targetNodeId = buildBranchNodeId(target, i)
this.addEdge(dag, sourceNodeId, targetNodeId, sourceHandle, targetHandle)
}
}
private findLoopBoundaryNodes(
nodes: string[],
dag: DAG,
reachableBlocks: Set<string>
): { startNodes: string[]; terminalNodes: string[] } {
const nodesSet = new Set(nodes)
const startNodesSet = new Set<string>()
const terminalNodesSet = new Set<string>()
for (const nodeId of nodes) {
const node = dag.nodes.get(nodeId)
if (!node) continue
let hasIncomingFromLoop = false
for (const incomingNodeId of node.incomingEdges) {
if (nodesSet.has(incomingNodeId)) {
hasIncomingFromLoop = true
break
}
}
if (!hasIncomingFromLoop) {
startNodesSet.add(nodeId)
}
}
for (const nodeId of nodes) {
const node = dag.nodes.get(nodeId)
if (!node) continue
let hasOutgoingToLoop = false
for (const [_, edge] of node.outgoingEdges) {
if (nodesSet.has(edge.target)) {
hasOutgoingToLoop = true
break
}
}
if (!hasOutgoingToLoop) {
terminalNodesSet.add(nodeId)
}
}
return {
startNodes: Array.from(startNodesSet),
terminalNodes: Array.from(terminalNodesSet),
}
}
private findParallelBoundaryNodes(
nodes: string[],
parallelId: string,
dag: DAG
): { entryNodes: string[]; terminalNodes: string[]; branchCount: number } {
const nodesSet = new Set(nodes)
const entryNodesSet = new Set<string>()
const terminalNodesSet = new Set<string>()
const parallelConfig = dag.parallelConfigs.get(parallelId)
if (!parallelConfig) {
throw new Error(`Parallel config not found: ${parallelId}`)
}
const distributionItems = parseDistributionItems(parallelConfig)
const branchCount = calculateBranchCount(parallelConfig, distributionItems)
for (const nodeId of nodes) {
let hasAnyBranch = false
for (let i = 0; i < branchCount; i++) {
if (dag.nodes.has(buildBranchNodeId(nodeId, i))) {
hasAnyBranch = true
break
}
}
if (!hasAnyBranch) continue
const firstBranchId = buildBranchNodeId(nodeId, 0)
const firstBranchNode = dag.nodes.get(firstBranchId)
if (!firstBranchNode) continue
let hasIncomingFromParallel = false
for (const incomingNodeId of firstBranchNode.incomingEdges) {
const originalNodeId = extractBaseBlockId(incomingNodeId)
if (nodesSet.has(originalNodeId)) {
hasIncomingFromParallel = true
break
}
}
if (!hasIncomingFromParallel) {
entryNodesSet.add(nodeId)
}
}
for (const nodeId of nodes) {
let hasAnyBranch = false
for (let i = 0; i < branchCount; i++) {
if (dag.nodes.has(buildBranchNodeId(nodeId, i))) {
hasAnyBranch = true
break
}
}
if (!hasAnyBranch) continue
const firstBranchId = buildBranchNodeId(nodeId, 0)
const firstBranchNode = dag.nodes.get(firstBranchId)
if (!firstBranchNode) continue
let hasOutgoingToParallel = false
for (const [_, edge] of firstBranchNode.outgoingEdges) {
const originalTargetId = extractBaseBlockId(edge.target)
if (nodesSet.has(originalTargetId)) {
hasOutgoingToParallel = true
break
}
}
if (!hasOutgoingToParallel) {
terminalNodesSet.add(nodeId)
}
}
return {
entryNodes: Array.from(entryNodesSet),
terminalNodes: Array.from(terminalNodesSet),
branchCount,
}
}
private getParallelId(blockId: string, dag: DAG): string | null {
for (const [parallelId, parallelConfig] of dag.parallelConfigs) {
if (parallelConfig.nodes.includes(blockId)) {
return parallelId
}
}
return null
}
private addEdge(
dag: DAG,
sourceId: string,
targetId: string,
sourceHandle?: string,
targetHandle?: string,
isLoopBackEdge = false
): void {
const sourceNode = dag.nodes.get(sourceId)
const targetNode = dag.nodes.get(targetId)
if (!sourceNode || !targetNode) {
logger.warn('Edge references non-existent node', { sourceId, targetId })
return
}
const edgeId = `${sourceId}${targetId}`
sourceNode.outgoingEdges.set(edgeId, {
target: targetId,
sourceHandle,
targetHandle,
isActive: isLoopBackEdge ? false : undefined,
})
if (!isLoopBackEdge) {
targetNode.incomingEdges.add(sourceId)
logger.debug('Added incoming edge', { from: sourceId, to: targetId })
} else {
logger.debug('Skipped adding backwards-edge to incomingEdges', {
from: sourceId,
to: targetId,
})
}
}
}

View File

@@ -0,0 +1,85 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType, LOOP, type SentinelType } from '@/executor/consts'
import { buildSentinelEndId, buildSentinelStartId } from '@/executor/utils/subflow-utils'
import type { DAG, DAGNode } from '../builder'
const logger = createLogger('LoopConstructor')
export class LoopConstructor {
execute(dag: DAG, reachableBlocks: Set<string>): void {
for (const [loopId, loopConfig] of dag.loopConfigs) {
const loopNodes = loopConfig.nodes
if (loopNodes.length === 0) {
continue
}
if (!this.hasReachableNodes(loopNodes, reachableBlocks)) {
logger.debug('Skipping sentinel creation for unreachable loop', { loopId })
continue
}
this.createSentinelPair(dag, loopId)
}
}
private hasReachableNodes(loopNodes: string[], reachableBlocks: Set<string>): boolean {
return loopNodes.some((nodeId) => reachableBlocks.has(nodeId))
}
private createSentinelPair(dag: DAG, loopId: string): void {
const startId = buildSentinelStartId(loopId)
const endId = buildSentinelEndId(loopId)
dag.nodes.set(
startId,
this.createSentinelNode({
id: startId,
loopId,
sentinelType: LOOP.SENTINEL.START_TYPE,
blockType: BlockType.SENTINEL_START,
name: `Loop Start (${loopId})`,
})
)
dag.nodes.set(
endId,
this.createSentinelNode({
id: endId,
loopId,
sentinelType: LOOP.SENTINEL.END_TYPE,
blockType: BlockType.SENTINEL_END,
name: `Loop End (${loopId})`,
})
)
logger.debug('Created sentinel pair for loop', {
loopId,
startId,
endId,
})
}
private createSentinelNode(config: {
id: string
loopId: string
sentinelType: SentinelType
blockType: BlockType
name: string
}): DAGNode {
return {
id: config.id,
block: {
id: config.id,
enabled: true,
metadata: {
id: config.blockType,
name: config.name,
loopId: config.loopId,
},
config: { params: {} },
} as any,
incomingEdges: new Set(),
outgoingEdges: new Map(),
metadata: {
isSentinel: true,
sentinelType: config.sentinelType,
loopId: config.loopId,
},
}
}
}

View File

@@ -0,0 +1,181 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isMetadataOnlyBlockType } from '@/executor/consts'
import {
buildBranchNodeId,
calculateBranchCount,
parseDistributionItems,
} from '@/executor/utils/subflow-utils'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
import type { DAG, DAGNode } from '../builder'
const logger = createLogger('NodeConstructor')
interface ParallelExpansion {
parallelId: string
branchCount: number
distributionItems: any[]
}
export class NodeConstructor {
execute(
workflow: SerializedWorkflow,
dag: DAG,
reachableBlocks: Set<string>
): { blocksInLoops: Set<string>; blocksInParallels: Set<string> } {
const blocksInLoops = new Set<string>()
const blocksInParallels = new Set<string>()
this.categorizeBlocks(dag, reachableBlocks, blocksInLoops, blocksInParallels)
for (const block of workflow.blocks) {
if (!this.shouldProcessBlock(block, reachableBlocks)) {
continue
}
const parallelId = this.findParallelForBlock(block.id, dag)
if (parallelId) {
this.createParallelBranchNodes(block, parallelId, dag)
} else {
this.createRegularOrLoopNode(block, blocksInLoops, dag)
}
}
return { blocksInLoops, blocksInParallels }
}
private shouldProcessBlock(block: SerializedBlock, reachableBlocks: Set<string>): boolean {
if (!block.enabled) {
return false
}
if (!reachableBlocks.has(block.id)) {
logger.debug('Skipping unreachable block', { blockId: block.id })
return false
}
if (isMetadataOnlyBlockType(block.metadata?.id)) {
logger.debug('Skipping metadata-only block', {
blockId: block.id,
blockType: block.metadata?.id,
})
return false
}
return true
}
private categorizeBlocks(
dag: DAG,
reachableBlocks: Set<string>,
blocksInLoops: Set<string>,
blocksInParallels: Set<string>
): void {
this.categorizeLoopBlocks(dag, reachableBlocks, blocksInLoops)
this.categorizeParallelBlocks(dag, reachableBlocks, blocksInParallels)
}
private categorizeLoopBlocks(
dag: DAG,
reachableBlocks: Set<string>,
blocksInLoops: Set<string>
): void {
for (const [, loopConfig] of dag.loopConfigs) {
for (const nodeId of loopConfig.nodes) {
if (reachableBlocks.has(nodeId)) {
blocksInLoops.add(nodeId)
}
}
}
}
private categorizeParallelBlocks(
dag: DAG,
reachableBlocks: Set<string>,
blocksInParallels: Set<string>
): void {
for (const [, parallelConfig] of dag.parallelConfigs) {
for (const nodeId of parallelConfig.nodes) {
if (reachableBlocks.has(nodeId)) {
blocksInParallels.add(nodeId)
}
}
}
}
private createParallelBranchNodes(block: SerializedBlock, parallelId: string, dag: DAG): void {
const expansion = this.calculateParallelExpansion(parallelId, dag)
logger.debug('Creating parallel branches', {
blockId: block.id,
parallelId: expansion.parallelId,
branchCount: expansion.branchCount,
})
for (let branchIndex = 0; branchIndex < expansion.branchCount; branchIndex++) {
const branchNode = this.createParallelBranchNode(block, branchIndex, expansion)
dag.nodes.set(branchNode.id, branchNode)
}
}
private calculateParallelExpansion(parallelId: string, dag: DAG): ParallelExpansion {
const config = dag.parallelConfigs.get(parallelId)
if (!config) {
throw new Error(`Parallel config not found: ${parallelId}`)
}
const distributionItems = parseDistributionItems(config)
const branchCount = calculateBranchCount(config, distributionItems)
return {
parallelId,
branchCount,
distributionItems,
}
}
private createParallelBranchNode(
baseBlock: SerializedBlock,
branchIndex: number,
expansion: ParallelExpansion
): DAGNode {
const branchNodeId = buildBranchNodeId(baseBlock.id, branchIndex)
return {
id: branchNodeId,
block: { ...baseBlock },
incomingEdges: new Set(),
outgoingEdges: new Map(),
metadata: {
isParallelBranch: true,
parallelId: expansion.parallelId,
branchIndex,
branchTotal: expansion.branchCount,
distributionItem: expansion.distributionItems[branchIndex],
},
}
}
private createRegularOrLoopNode(
block: SerializedBlock,
blocksInLoops: Set<string>,
dag: DAG
): void {
const isLoopNode = blocksInLoops.has(block.id)
const loopId = isLoopNode ? this.findLoopIdForBlock(block.id, dag) : undefined
dag.nodes.set(block.id, {
id: block.id,
block,
incomingEdges: new Set(),
outgoingEdges: new Map(),
metadata: {
isLoopNode,
loopId,
},
})
}
private findLoopIdForBlock(blockId: string, dag: DAG): string | undefined {
for (const [loopId, loopConfig] of dag.loopConfigs) {
if (loopConfig.nodes.includes(blockId)) {
return loopId
}
}
return undefined
}
private findParallelForBlock(blockId: string, dag: DAG): string | null {
for (const [parallelId, parallelConfig] of dag.parallelConfigs) {
if (parallelConfig.nodes.includes(blockId)) {
return parallelId
}
}
return null
}
}

View File

@@ -0,0 +1,152 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isMetadataOnlyBlockType, isTriggerBlockType } from '@/executor/consts'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
const logger = createLogger('PathConstructor')
export class PathConstructor {
execute(workflow: SerializedWorkflow, triggerBlockId?: string): Set<string> {
const resolvedTriggerId = this.findTriggerBlock(workflow, triggerBlockId)
if (!resolvedTriggerId) {
logger.warn('No trigger block found, including all enabled blocks as fallback')
return this.getAllEnabledBlocks(workflow)
}
logger.debug('Starting reachability traversal', { triggerBlockId: resolvedTriggerId })
const adjacency = this.buildAdjacencyMap(workflow)
const reachable = this.performBFS(resolvedTriggerId, adjacency)
logger.debug('Reachability analysis complete', {
triggerBlockId: resolvedTriggerId,
reachableCount: reachable.size,
totalBlocks: workflow.blocks.length,
})
return reachable
}
private findTriggerBlock(
workflow: SerializedWorkflow,
triggerBlockId?: string
): string | undefined {
if (triggerBlockId) {
const block = workflow.blocks.find((b) => b.id === triggerBlockId)
if (!block) {
logger.error('Provided triggerBlockId not found in workflow', {
triggerBlockId,
availableBlocks: workflow.blocks.map((b) => ({ id: b.id, type: b.metadata?.id })),
})
throw new Error(`Trigger block not found: ${triggerBlockId}`)
}
logger.debug('Using explicitly provided trigger block', {
triggerBlockId,
blockType: block.metadata?.id,
})
return triggerBlockId
}
const explicitTrigger = this.findExplicitTrigger(workflow)
if (explicitTrigger) {
return explicitTrigger
}
const rootBlock = this.findRootBlock(workflow)
if (rootBlock) {
return rootBlock
}
return undefined
}
private findExplicitTrigger(workflow: SerializedWorkflow): string | undefined {
for (const block of workflow.blocks) {
if (block.enabled && this.isTriggerBlock(block)) {
logger.debug('Found explicit trigger block', {
blockId: block.id,
blockType: block.metadata?.id,
})
return block.id
}
}
return undefined
}
private findRootBlock(workflow: SerializedWorkflow): string | undefined {
const hasIncoming = new Set(workflow.connections.map((c) => c.target))
for (const block of workflow.blocks) {
if (
!hasIncoming.has(block.id) &&
block.enabled &&
!isMetadataOnlyBlockType(block.metadata?.id)
) {
logger.debug('Found root block (no incoming connections)', {
blockId: block.id,
blockType: block.metadata?.id,
})
return block.id
}
}
return undefined
}
private isTriggerBlock(block: SerializedBlock): boolean {
return isTriggerBlockType(block.metadata?.id)
}
private getAllEnabledBlocks(workflow: SerializedWorkflow): Set<string> {
return new Set(workflow.blocks.filter((b) => b.enabled).map((b) => b.id))
}
private buildAdjacencyMap(workflow: SerializedWorkflow): Map<string, string[]> {
const adjacency = new Map<string, string[]>()
for (const connection of workflow.connections) {
const neighbors = adjacency.get(connection.source) ?? []
neighbors.push(connection.target)
adjacency.set(connection.source, neighbors)
}
logger.debug('Built adjacency map', {
nodeCount: adjacency.size,
connectionCount: workflow.connections.length,
})
return adjacency
}
private performBFS(triggerBlockId: string, adjacency: Map<string, string[]>): Set<string> {
const reachable = new Set<string>([triggerBlockId])
const queue = [triggerBlockId]
logger.debug('Starting BFS traversal', {
triggerBlockId,
adjacencyMapSize: adjacency.size,
adjacencyEntries: Array.from(adjacency.entries()).map(([source, targets]) => ({
source,
targets,
})),
})
while (queue.length > 0) {
const currentBlockId = queue.shift()
if (!currentBlockId) break
const neighbors = adjacency.get(currentBlockId) ?? []
logger.debug('BFS processing node', {
currentBlockId,
neighbors,
neighborCount: neighbors.length,
})
for (const neighborId of neighbors) {
if (!reachable.has(neighborId)) {
logger.debug('BFS found new reachable node', {
from: currentBlockId,
to: neighborId,
})
reachable.add(neighborId)
queue.push(neighborId)
}
}
}
logger.debug('BFS traversal complete', {
triggerBlockId,
reachableCount: reachable.size,
reachableBlocks: Array.from(reachable),
})
return reachable
}
}

View File

@@ -0,0 +1,18 @@
export interface DAGEdge {
target: string
sourceHandle?: string
targetHandle?: string
isActive?: boolean
}
export interface NodeMetadata {
isParallelBranch?: boolean
parallelId?: string // Which parallel this branch belongs to
branchIndex?: number
branchTotal?: number
distributionItem?: unknown
isLoopNode?: boolean
loopId?: string
isSentinel?: boolean
sentinelType?: 'start' | 'end'
}

View File

@@ -0,0 +1,285 @@
import { createLogger } from '@/lib/logs/console/logger'
import { DEFAULTS, EDGE, isSentinelBlockType } from '@/executor/consts'
import type {
BlockHandler,
BlockLog,
ExecutionContext,
NormalizedBlockOutput,
} from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
import type { SubflowType } from '@/stores/workflows/workflow/types'
import type { DAGNode } from '../dag/builder'
import type { VariableResolver } from '../variables/resolver'
import type { ExecutionState } from './state'
import type { ContextExtensions } from './types'
const logger = createLogger('BlockExecutor')
export class BlockExecutor {
constructor(
private blockHandlers: BlockHandler[],
private resolver: VariableResolver,
private contextExtensions: ContextExtensions,
private state?: ExecutionState
) {}
async execute(
ctx: ExecutionContext,
node: DAGNode,
block: SerializedBlock
): Promise<NormalizedBlockOutput> {
const handler = this.findHandler(block)
if (!handler) {
throw new Error(`No handler found for block type: ${block.metadata?.id}`)
}
const isSentinel = isSentinelBlockType(block.metadata?.id ?? '')
let blockLog: BlockLog | undefined
if (!isSentinel) {
blockLog = this.createBlockLog(ctx, node.id, block, node)
ctx.blockLogs.push(blockLog)
this.callOnBlockStart(ctx, node, block)
}
const startTime = Date.now()
let resolvedInputs: Record<string, any> = {}
try {
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
const output = await handler.execute(ctx, block, resolvedInputs)
const isStreamingExecution =
output && typeof output === 'object' && 'stream' in output && 'execution' in output
let normalizedOutput: NormalizedBlockOutput
if (isStreamingExecution) {
const streamingExec = output as { stream: ReadableStream; execution: any }
if (ctx.onStream) {
try {
await ctx.onStream(streamingExec)
} catch (error) {
logger.error('Error in onStream callback', { blockId: node.id, error })
}
}
normalizedOutput = this.normalizeOutput(
streamingExec.execution.output || streamingExec.execution
)
} else {
normalizedOutput = this.normalizeOutput(output)
}
const duration = Date.now() - startTime
if (blockLog) {
blockLog.endedAt = new Date().toISOString()
blockLog.durationMs = duration
blockLog.success = true
blockLog.output = normalizedOutput
}
ctx.blockStates.set(node.id, {
output: normalizedOutput,
executed: true,
executionTime: duration,
})
if (!isSentinel) {
this.callOnBlockComplete(ctx, node, block, resolvedInputs, normalizedOutput, duration)
}
return normalizedOutput
} catch (error) {
const duration = Date.now() - startTime
const errorMessage = error instanceof Error ? error.message : String(error)
if (blockLog) {
blockLog.endedAt = new Date().toISOString()
blockLog.durationMs = duration
blockLog.success = false
blockLog.error = errorMessage
}
const errorOutput: NormalizedBlockOutput = {
error: errorMessage,
}
ctx.blockStates.set(node.id, {
output: errorOutput,
executed: true,
executionTime: duration,
})
logger.error('Block execution failed', {
blockId: node.id,
blockType: block.metadata?.id,
error: errorMessage,
})
if (!isSentinel) {
this.callOnBlockComplete(ctx, node, block, resolvedInputs, errorOutput, duration)
}
const hasErrorPort = this.hasErrorPortEdge(node)
if (hasErrorPort) {
logger.info('Block has error port - returning error output instead of throwing', {
blockId: node.id,
error: errorMessage,
})
return errorOutput
}
throw error
}
}
private findHandler(block: SerializedBlock): BlockHandler | undefined {
return this.blockHandlers.find((h) => h.canHandle(block))
}
private hasErrorPortEdge(node: DAGNode): boolean {
for (const [_, edge] of node.outgoingEdges) {
if (edge.sourceHandle === EDGE.ERROR) {
return true
}
}
return false
}
private createBlockLog(
ctx: ExecutionContext,
blockId: string,
block: SerializedBlock,
node: DAGNode
): BlockLog {
let blockName = block.metadata?.name || blockId
let loopId: string | undefined
let parallelId: string | undefined
let iterationIndex: number | undefined
if (node?.metadata) {
if (node.metadata.branchIndex !== undefined && node.metadata.parallelId) {
blockName = `${blockName} (iteration ${node.metadata.branchIndex})`
iterationIndex = node.metadata.branchIndex
parallelId = node.metadata.parallelId
logger.debug('Added parallel iteration suffix', {
blockId,
parallelId,
branchIndex: node.metadata.branchIndex,
blockName,
})
} else if (node.metadata.isLoopNode && node.metadata.loopId && this.state) {
loopId = node.metadata.loopId
const loopScope = this.state.getLoopScope(loopId)
if (loopScope && loopScope.iteration !== undefined) {
blockName = `${blockName} (iteration ${loopScope.iteration})`
iterationIndex = loopScope.iteration
logger.debug('Added loop iteration suffix', {
blockId,
loopId,
iteration: loopScope.iteration,
blockName,
})
} else {
logger.warn('Loop scope not found for block', { blockId, loopId })
}
}
}
return {
blockId,
blockName,
blockType: block.metadata?.id || DEFAULTS.BLOCK_TYPE,
startedAt: new Date().toISOString(),
endedAt: '',
durationMs: 0,
success: false,
loopId,
parallelId,
iterationIndex,
}
}
private normalizeOutput(output: unknown): NormalizedBlockOutput {
if (output === null || output === undefined) {
return {}
}
if (typeof output === 'object' && !Array.isArray(output)) {
return output as NormalizedBlockOutput
}
return { result: output }
}
private callOnBlockStart(ctx: ExecutionContext, node: DAGNode, block: SerializedBlock): void {
const blockId = node.id
const blockName = block.metadata?.name || blockId
const blockType = block.metadata?.id || DEFAULTS.BLOCK_TYPE
const iterationContext = this.getIterationContext(node)
if (this.contextExtensions.onBlockStart) {
this.contextExtensions.onBlockStart(blockId, blockName, blockType, iterationContext)
}
}
private callOnBlockComplete(
ctx: ExecutionContext,
node: DAGNode,
block: SerializedBlock,
input: Record<string, any>,
output: NormalizedBlockOutput,
duration: number
): void {
const blockId = node.id
const blockName = block.metadata?.name || blockId
const blockType = block.metadata?.id || DEFAULTS.BLOCK_TYPE
const iterationContext = this.getIterationContext(node)
if (this.contextExtensions.onBlockComplete) {
this.contextExtensions.onBlockComplete(
blockId,
blockName,
blockType,
{
input,
output,
executionTime: duration,
},
iterationContext
)
}
}
private getIterationContext(
node: DAGNode
): { iterationCurrent: number; iterationTotal: number; iterationType: SubflowType } | undefined {
if (!node?.metadata) return undefined
if (node.metadata.branchIndex !== undefined && node.metadata.branchTotal) {
return {
iterationCurrent: node.metadata.branchIndex,
iterationTotal: node.metadata.branchTotal,
iterationType: 'parallel',
}
}
if (node.metadata.isLoopNode && node.metadata.loopId && this.state) {
const loopScope = this.state.getLoopScope(node.metadata.loopId)
if (loopScope && loopScope.iteration !== undefined && loopScope.maxIterations) {
return {
iterationCurrent: loopScope.iteration,
iterationTotal: loopScope.maxIterations,
iterationType: 'loop',
}
}
}
return undefined
}
}

View File

@@ -0,0 +1,223 @@
import { createLogger } from '@/lib/logs/console/logger'
import { EDGE } from '@/executor/consts'
import type { NormalizedBlockOutput } from '@/executor/types'
import type { DAG, DAGNode } from '../dag/builder'
import type { DAGEdge } from '../dag/types'
const logger = createLogger('EdgeManager')
export class EdgeManager {
private deactivatedEdges = new Set<string>()
constructor(private dag: DAG) {}
processOutgoingEdges(
node: DAGNode,
output: NormalizedBlockOutput,
skipBackwardsEdge = false
): string[] {
const readyNodes: string[] = []
logger.debug('Processing outgoing edges', {
nodeId: node.id,
edgeCount: node.outgoingEdges.size,
skipBackwardsEdge,
})
for (const [edgeId, edge] of node.outgoingEdges) {
if (skipBackwardsEdge && this.isBackwardsEdge(edge.sourceHandle)) {
logger.debug('Skipping backwards edge', { edgeId })
continue
}
const shouldActivate = this.shouldActivateEdge(edge, output)
if (!shouldActivate) {
const isLoopEdge =
edge.sourceHandle === EDGE.LOOP_CONTINUE ||
edge.sourceHandle === EDGE.LOOP_CONTINUE_ALT ||
edge.sourceHandle === EDGE.LOOP_EXIT
if (!isLoopEdge) {
this.deactivateEdgeAndDescendants(node.id, edge.target, edge.sourceHandle)
}
logger.debug('Edge not activated', {
edgeId,
sourceHandle: edge.sourceHandle,
from: node.id,
to: edge.target,
isLoopEdge,
deactivatedDescendants: !isLoopEdge,
})
continue
}
const targetNode = this.dag.nodes.get(edge.target)
if (!targetNode) {
logger.warn('Target node not found', { target: edge.target })
continue
}
targetNode.incomingEdges.delete(node.id)
logger.debug('Removed incoming edge', {
from: node.id,
target: edge.target,
remainingIncomingEdges: targetNode.incomingEdges.size,
})
if (this.isNodeReady(targetNode)) {
logger.debug('Node ready', { nodeId: targetNode.id })
readyNodes.push(targetNode.id)
}
}
return readyNodes
}
isNodeReady(node: DAGNode): boolean {
if (node.incomingEdges.size === 0) {
return true
}
const activeIncomingCount = this.countActiveIncomingEdges(node)
if (activeIncomingCount > 0) {
logger.debug('Node not ready - waiting for active incoming edges', {
nodeId: node.id,
totalIncoming: node.incomingEdges.size,
activeIncoming: activeIncomingCount,
})
return false
}
logger.debug('Node ready - all remaining edges are deactivated', {
nodeId: node.id,
totalIncoming: node.incomingEdges.size,
})
return true
}
restoreIncomingEdge(targetNodeId: string, sourceNodeId: string): void {
const targetNode = this.dag.nodes.get(targetNodeId)
if (!targetNode) {
logger.warn('Cannot restore edge - target node not found', { targetNodeId })
return
}
targetNode.incomingEdges.add(sourceNodeId)
logger.debug('Restored incoming edge', {
from: sourceNodeId,
to: targetNodeId,
})
}
clearDeactivatedEdges(): void {
this.deactivatedEdges.clear()
}
private shouldActivateEdge(edge: DAGEdge, output: NormalizedBlockOutput): boolean {
const handle = edge.sourceHandle
if (handle?.startsWith(EDGE.CONDITION_PREFIX)) {
const conditionValue = handle.substring(EDGE.CONDITION_PREFIX.length)
return output.selectedOption === conditionValue
}
if (handle?.startsWith(EDGE.ROUTER_PREFIX)) {
const routeId = handle.substring(EDGE.ROUTER_PREFIX.length)
return output.selectedRoute === routeId
}
if (handle === EDGE.LOOP_CONTINUE || handle === EDGE.LOOP_CONTINUE_ALT) {
return output.selectedRoute === EDGE.LOOP_CONTINUE
}
if (handle === EDGE.LOOP_EXIT) {
return output.selectedRoute === EDGE.LOOP_EXIT
}
if (handle === EDGE.ERROR && !output.error) {
return false
}
if (handle === EDGE.SOURCE && output.error) {
return false
}
return true
}
private isBackwardsEdge(sourceHandle?: string): boolean {
return sourceHandle === EDGE.LOOP_CONTINUE || sourceHandle === EDGE.LOOP_CONTINUE_ALT
}
private deactivateEdgeAndDescendants(
sourceId: string,
targetId: string,
sourceHandle?: string
): void {
const edgeKey = this.createEdgeKey(sourceId, targetId, sourceHandle)
if (this.deactivatedEdges.has(edgeKey)) {
return
}
this.deactivatedEdges.add(edgeKey)
const targetNode = this.dag.nodes.get(targetId)
if (!targetNode) return
const hasOtherActiveIncoming = this.hasActiveIncomingEdges(targetNode, sourceId)
if (!hasOtherActiveIncoming) {
logger.debug('Deactivating descendants of unreachable node', { nodeId: targetId })
for (const [_, outgoingEdge] of targetNode.outgoingEdges) {
this.deactivateEdgeAndDescendants(targetId, outgoingEdge.target, outgoingEdge.sourceHandle)
}
}
}
private hasActiveIncomingEdges(node: DAGNode, excludeSourceId: string): boolean {
for (const incomingSourceId of node.incomingEdges) {
if (incomingSourceId === excludeSourceId) continue
const incomingNode = this.dag.nodes.get(incomingSourceId)
if (!incomingNode) continue
for (const [_, incomingEdge] of incomingNode.outgoingEdges) {
if (incomingEdge.target === node.id) {
const incomingEdgeKey = this.createEdgeKey(
incomingSourceId,
node.id,
incomingEdge.sourceHandle
)
if (!this.deactivatedEdges.has(incomingEdgeKey)) {
return true
}
}
}
}
return false
}
private countActiveIncomingEdges(node: DAGNode): number {
let count = 0
for (const sourceId of node.incomingEdges) {
const sourceNode = this.dag.nodes.get(sourceId)
if (!sourceNode) continue
for (const [_, edge] of sourceNode.outgoingEdges) {
if (edge.target === node.id) {
const edgeKey = this.createEdgeKey(sourceId, edge.target, edge.sourceHandle)
if (!this.deactivatedEdges.has(edgeKey)) {
count++
break
}
}
}
}
return count
}
private createEdgeKey(sourceId: string, targetId: string, sourceHandle?: string): string {
return `${sourceId}-${targetId}-${sourceHandle || EDGE.DEFAULT}`
}
}

View File

@@ -0,0 +1,201 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/consts'
import type { ExecutionContext, ExecutionResult, NormalizedBlockOutput } from '@/executor/types'
import type { DAG } from '../dag/builder'
import type { NodeExecutionOrchestrator } from '../orchestrators/node'
import type { EdgeManager } from './edge-manager'
const logger = createLogger('ExecutionEngine')
export class ExecutionEngine {
private readyQueue: string[] = []
private executing = new Set<Promise<void>>()
private queueLock = Promise.resolve()
private finalOutput: NormalizedBlockOutput = {}
constructor(
private dag: DAG,
private edgeManager: EdgeManager,
private nodeOrchestrator: NodeExecutionOrchestrator,
private context: ExecutionContext
) {}
async run(triggerBlockId?: string): Promise<ExecutionResult> {
const startTime = Date.now()
try {
this.initializeQueue(triggerBlockId)
logger.debug('Starting execution loop', {
initialQueueSize: this.readyQueue.length,
startNodeId: triggerBlockId,
})
while (this.hasWork()) {
await this.processQueue()
}
logger.debug('Execution loop completed', {
finalOutputKeys: Object.keys(this.finalOutput),
})
await this.waitForAllExecutions()
const endTime = Date.now()
this.context.metadata.endTime = new Date(endTime).toISOString()
this.context.metadata.duration = endTime - startTime
return {
success: true,
output: this.finalOutput,
logs: this.context.blockLogs,
metadata: this.context.metadata,
}
} catch (error) {
const endTime = Date.now()
this.context.metadata.endTime = new Date(endTime).toISOString()
this.context.metadata.duration = endTime - startTime
const errorMessage = error instanceof Error ? error.message : String(error)
logger.error('Execution failed', { error: errorMessage })
const executionResult: ExecutionResult = {
success: false,
output: this.finalOutput,
error: errorMessage,
logs: this.context.blockLogs,
metadata: this.context.metadata,
}
const executionError = new Error(errorMessage)
;(executionError as any).executionResult = executionResult
throw executionError
}
}
private hasWork(): boolean {
return this.readyQueue.length > 0 || this.executing.size > 0
}
private addToQueue(nodeId: string): void {
if (!this.readyQueue.includes(nodeId)) {
this.readyQueue.push(nodeId)
logger.debug('Added to queue', { nodeId, queueLength: this.readyQueue.length })
}
}
private addMultipleToQueue(nodeIds: string[]): void {
for (const nodeId of nodeIds) {
this.addToQueue(nodeId)
}
}
private dequeue(): string | undefined {
return this.readyQueue.shift()
}
private trackExecution(promise: Promise<void>): void {
this.executing.add(promise)
promise.finally(() => {
this.executing.delete(promise)
})
}
private async waitForAnyExecution(): Promise<void> {
if (this.executing.size > 0) {
await Promise.race(this.executing)
}
}
private async waitForAllExecutions(): Promise<void> {
await Promise.all(Array.from(this.executing))
}
private async withQueueLock<T>(fn: () => Promise<T> | T): Promise<T> {
const prevLock = this.queueLock
let resolveLock: () => void
this.queueLock = new Promise((resolve) => {
resolveLock = resolve
})
await prevLock
try {
return await fn()
} finally {
resolveLock!()
}
}
private initializeQueue(triggerBlockId?: string): void {
if (triggerBlockId) {
this.addToQueue(triggerBlockId)
return
}
const startNode = Array.from(this.dag.nodes.values()).find(
(node) =>
node.block.metadata?.id === BlockType.START_TRIGGER ||
node.block.metadata?.id === BlockType.STARTER
)
if (startNode) {
this.addToQueue(startNode.id)
} else {
logger.warn('No start node found in DAG')
}
}
private async processQueue(): Promise<void> {
while (this.readyQueue.length > 0) {
const nodeId = this.dequeue()
if (!nodeId) continue
const promise = this.executeNodeAsync(nodeId)
this.trackExecution(promise)
}
if (this.executing.size > 0) {
await this.waitForAnyExecution()
}
}
private async executeNodeAsync(nodeId: string): Promise<void> {
try {
const wasAlreadyExecuted = this.context.executedBlocks.has(nodeId)
const result = await this.nodeOrchestrator.executeNode(nodeId, this.context)
if (!wasAlreadyExecuted) {
await this.withQueueLock(async () => {
await this.handleNodeCompletion(nodeId, result.output, result.isFinalOutput)
})
} else {
logger.debug('Node was already executed, skipping edge processing to avoid loops', {
nodeId,
})
}
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
logger.error('Node execution failed', { nodeId, error: errorMessage })
throw error
}
}
private async handleNodeCompletion(
nodeId: string,
output: NormalizedBlockOutput,
isFinalOutput: boolean
): Promise<void> {
const node = this.dag.nodes.get(nodeId)
if (!node) {
logger.error('Node not found during completion', { nodeId })
return
}
await this.nodeOrchestrator.handleNodeCompletion(nodeId, output, this.context)
if (isFinalOutput) {
this.finalOutput = output
}
const readyNodes = this.edgeManager.processOutgoingEdges(node, output, false)
this.addMultipleToQueue(readyNodes)
logger.debug('Node completion handled', {
nodeId,
readyNodesCount: readyNodes.length,
queueSize: this.readyQueue.length,
})
}
}

View File

@@ -0,0 +1,186 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { createBlockHandlers } from '@/executor/handlers/registry'
import type { ExecutionContext, ExecutionResult } from '@/executor/types'
import {
buildResolutionFromBlock,
buildStartBlockOutput,
resolveExecutorStartBlock,
} from '@/executor/utils/start-block'
import type { SerializedWorkflow } from '@/serializer/types'
import { DAGBuilder } from '../dag/builder'
import { LoopOrchestrator } from '../orchestrators/loop'
import { NodeExecutionOrchestrator } from '../orchestrators/node'
import { ParallelOrchestrator } from '../orchestrators/parallel'
import { VariableResolver } from '../variables/resolver'
import { BlockExecutor } from './block-executor'
import { EdgeManager } from './edge-manager'
import { ExecutionEngine } from './engine'
import { ExecutionState } from './state'
import type { ContextExtensions, WorkflowInput } from './types'
const logger = createLogger('DAGExecutor')
export interface DAGExecutorOptions {
workflow: SerializedWorkflow
currentBlockStates?: Record<string, BlockOutput>
envVarValues?: Record<string, string>
workflowInput?: WorkflowInput
workflowVariables?: Record<string, unknown>
contextExtensions?: ContextExtensions
}
export class DAGExecutor {
private workflow: SerializedWorkflow
private initialBlockStates: Record<string, BlockOutput>
private environmentVariables: Record<string, string>
private workflowInput: WorkflowInput
private workflowVariables: Record<string, unknown>
private contextExtensions: ContextExtensions
private isCancelled = false
private dagBuilder: DAGBuilder
constructor(options: DAGExecutorOptions) {
this.workflow = options.workflow
this.initialBlockStates = options.currentBlockStates || {}
this.environmentVariables = options.envVarValues || {}
this.workflowInput = options.workflowInput || {}
this.workflowVariables = options.workflowVariables || {}
this.contextExtensions = options.contextExtensions || {}
this.dagBuilder = new DAGBuilder()
}
async execute(workflowId: string, triggerBlockId?: string): Promise<ExecutionResult> {
const dag = this.dagBuilder.build(this.workflow, triggerBlockId)
const context = this.createExecutionContext(workflowId, triggerBlockId)
// Create state with shared references to context's maps/sets for single source of truth
const state = new ExecutionState(context.blockStates, context.executedBlocks)
const resolver = new VariableResolver(this.workflow, this.workflowVariables, state)
const loopOrchestrator = new LoopOrchestrator(dag, state, resolver)
const parallelOrchestrator = new ParallelOrchestrator(dag, state)
const allHandlers = createBlockHandlers()
const blockExecutor = new BlockExecutor(allHandlers, resolver, this.contextExtensions, state)
const edgeManager = new EdgeManager(dag)
const nodeOrchestrator = new NodeExecutionOrchestrator(
dag,
state,
blockExecutor,
loopOrchestrator,
parallelOrchestrator
)
const engine = new ExecutionEngine(dag, edgeManager, nodeOrchestrator, context)
return await engine.run(triggerBlockId)
}
cancel(): void {
this.isCancelled = true
}
async continueExecution(
pendingBlocks: string[],
context: ExecutionContext
): Promise<ExecutionResult> {
logger.warn('Debug mode (continueExecution) is not yet implemented in the refactored executor')
return {
success: false,
output: {},
logs: context.blockLogs || [],
error: 'Debug mode is not yet supported in the refactored executor',
metadata: {
duration: 0,
startTime: new Date().toISOString(),
},
}
}
private createExecutionContext(workflowId: string, triggerBlockId?: string): ExecutionContext {
const context: ExecutionContext = {
workflowId,
workspaceId: this.contextExtensions.workspaceId,
executionId: this.contextExtensions.executionId,
isDeployedContext: this.contextExtensions.isDeployedContext,
blockStates: new Map(),
blockLogs: [],
metadata: {
startTime: new Date().toISOString(),
duration: 0,
},
environmentVariables: this.environmentVariables,
workflowVariables: this.workflowVariables,
decisions: {
router: new Map(),
condition: new Map(),
},
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow: this.workflow,
stream: this.contextExtensions.stream || false,
selectedOutputs: this.contextExtensions.selectedOutputs || [],
edges: this.contextExtensions.edges || [],
onStream: this.contextExtensions.onStream,
onBlockStart: this.contextExtensions.onBlockStart,
onBlockComplete: this.contextExtensions.onBlockComplete,
}
this.initializeStarterBlock(context, triggerBlockId)
return context
}
private initializeStarterBlock(context: ExecutionContext, triggerBlockId?: string): void {
let startResolution: ReturnType<typeof resolveExecutorStartBlock> | null = null
if (triggerBlockId) {
const triggerBlock = this.workflow.blocks.find((b) => b.id === triggerBlockId)
if (!triggerBlock) {
logger.error('Specified trigger block not found in workflow', {
triggerBlockId,
})
throw new Error(`Trigger block not found: ${triggerBlockId}`)
}
startResolution = buildResolutionFromBlock(triggerBlock)
if (!startResolution) {
logger.debug('Creating generic resolution for trigger block', {
triggerBlockId,
blockType: triggerBlock.metadata?.id,
})
startResolution = {
blockId: triggerBlock.id,
block: triggerBlock,
path: 'split_manual' as any,
}
}
} else {
startResolution = resolveExecutorStartBlock(this.workflow.blocks, {
execution: 'manual',
isChildWorkflow: false,
})
if (!startResolution?.block) {
logger.warn('No start block found in workflow')
return
}
}
const blockOutput = buildStartBlockOutput({
resolution: startResolution,
workflowInput: this.workflowInput,
isDeployedExecution: this.contextExtensions?.isDeployedContext === true,
})
context.blockStates.set(startResolution.block.id, {
output: blockOutput,
executed: true,
executionTime: 0,
})
logger.debug('Initialized start block', {
blockId: startResolution.block.id,
blockType: startResolution.block.metadata?.id,
})
}
}

View File

@@ -0,0 +1,98 @@
import type { Edge } from 'reactflow'
import type { BlockLog, BlockState } from '@/executor/types'
export interface ExecutionMetadata {
requestId: string
executionId: string
workflowId: string
workspaceId?: string
userId: string
triggerType: string
triggerBlockId?: string
useDraftState: boolean
startTime: string
}
export interface ExecutionCallbacks {
onStream?: (streamingExec: any) => Promise<void>
onBlockStart?: (blockId: string, blockName: string, blockType: string) => Promise<void>
onBlockComplete?: (
blockId: string,
blockName: string,
blockType: string,
output: any
) => Promise<void>
onExecutorCreated?: (executor: any) => void
}
export interface SerializableExecutionState {
blockStates: Record<string, BlockState>
executedBlocks: string[]
blockLogs: BlockLog[]
decisions: {
router: Record<string, string>
condition: Record<string, string>
}
loopIterations: Record<string, number>
loopItems: Record<string, any>
completedLoops: string[]
loopExecutions?: Record<string, any>
parallelExecutions?: Record<string, any>
parallelBlockMapping?: Record<string, any>
activeExecutionPath: string[]
pendingQueue?: string[]
remainingEdges?: Edge[]
}
export class ExecutionSnapshot {
constructor(
public readonly metadata: ExecutionMetadata,
public readonly workflow: any,
public readonly input: any,
public readonly environmentVariables: Record<string, string>,
public readonly workflowVariables: Record<string, any>,
public readonly selectedOutputs: string[] = [],
public readonly state?: SerializableExecutionState
) {}
toJSON(): string {
return JSON.stringify({
metadata: this.metadata,
workflow: this.workflow,
input: this.input,
environmentVariables: this.environmentVariables,
workflowVariables: this.workflowVariables,
selectedOutputs: this.selectedOutputs,
state: this.state,
})
}
static fromJSON(json: string): ExecutionSnapshot {
const data = JSON.parse(json)
return new ExecutionSnapshot(
data.metadata,
data.workflow,
data.input,
data.environmentVariables,
data.workflowVariables,
data.selectedOutputs,
data.state
)
}
}
// TODO: Implement pause/resume functionality
//
// Future implementation should include:
// 1. executor.pause() - Captures current state mid-execution
// - Serialize ExecutionContext (blockStates, decisions, loops, etc) to state property
// - Save snapshot.toJSON() to database
// 2. executor.resume(snapshot) - Reconstructs execution from saved state
// - Load snapshot from database
// - Restore ExecutionContext from state property
// - Continue execution from pendingQueue
// 3. API endpoints:
// - POST /api/executions/[id]/pause
// - POST /api/executions/[id]/resume
// 4. Database schema:
// - execution_snapshots table with snapshot JSON column

View File

@@ -0,0 +1,70 @@
import type { NormalizedBlockOutput } from '@/executor/types'
export interface LoopScope {
iteration: number
currentIterationOutputs: Map<string, NormalizedBlockOutput>
allIterationOutputs: NormalizedBlockOutput[][]
maxIterations?: number
item?: any
items?: any[]
condition?: string
skipFirstConditionCheck?: boolean
}
export interface ParallelScope {
parallelId: string
totalBranches: number
branchOutputs: Map<number, NormalizedBlockOutput[]>
completedCount: number
totalExpectedNodes: number
}
export class ExecutionState {
// Shared references with ExecutionContext for single source of truth
readonly blockStates: Map<
string,
{ output: NormalizedBlockOutput; executed: boolean; executionTime: number }
>
readonly executedBlocks: Set<string>
readonly loopScopes = new Map<string, LoopScope>()
readonly parallelScopes = new Map<string, ParallelScope>()
constructor(
blockStates: Map<
string,
{ output: NormalizedBlockOutput; executed: boolean; executionTime: number }
>,
executedBlocks: Set<string>
) {
this.blockStates = blockStates
this.executedBlocks = executedBlocks
}
getBlockOutput(blockId: string): NormalizedBlockOutput | undefined {
return this.blockStates.get(blockId)?.output
}
setBlockOutput(blockId: string, output: NormalizedBlockOutput): void {
this.blockStates.set(blockId, { output, executed: true, executionTime: 0 })
this.executedBlocks.add(blockId)
}
hasExecuted(blockId: string): boolean {
return this.executedBlocks.has(blockId)
}
getLoopScope(loopId: string): LoopScope | undefined {
return this.loopScopes.get(loopId)
}
setLoopScope(loopId: string, scope: LoopScope): void {
this.loopScopes.set(loopId, scope)
}
getParallelScope(parallelId: string): ParallelScope | undefined {
return this.parallelScopes.get(parallelId)
}
setParallelScope(parallelId: string, scope: ParallelScope): void {
this.parallelScopes.set(parallelId, scope)
}
}

View File

@@ -0,0 +1,38 @@
import type { NormalizedBlockOutput } from '@/executor/types'
import type { SubflowType } from '@/stores/workflows/workflow/types'
export interface ContextExtensions {
workspaceId?: string
executionId?: string
stream?: boolean
selectedOutputs?: string[]
edges?: Array<{ source: string; target: string }>
isDeployedContext?: boolean
isChildExecution?: boolean
onStream?: (streamingExecution: unknown) => Promise<void>
onBlockStart?: (
blockId: string,
blockName: string,
blockType: string,
iterationContext?: {
iterationCurrent: number
iterationTotal: number
iterationType: SubflowType
}
) => Promise<void>
onBlockComplete?: (
blockId: string,
blockName: string,
blockType: string,
output: { input?: any; output: NormalizedBlockOutput; executionTime: number },
iterationContext?: {
iterationCurrent: number
iterationTotal: number
iterationType: SubflowType
}
) => Promise<void>
}
export interface WorkflowInput {
[key: string]: unknown
}

View File

@@ -219,7 +219,7 @@ describe('AgentBlockHandler', () => {
cost: 0.001,
}
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('gpt-4o')
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
@@ -338,7 +338,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(Promise.all).toHaveBeenCalled()
@@ -421,7 +421,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -466,7 +466,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -531,7 +531,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -563,7 +563,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
})
@@ -601,7 +601,7 @@ describe('AgentBlockHandler', () => {
cost: 0.001,
}
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockTransformBlockTool).toHaveBeenCalledWith(
inputs.tools[0],
@@ -656,7 +656,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
})
@@ -692,7 +692,7 @@ describe('AgentBlockHandler', () => {
'{"type":"object","properties":{"result":{"type":"string"},"score":{"type":"number"}}}',
}
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toEqual({
result: 'Success',
@@ -732,7 +732,7 @@ describe('AgentBlockHandler', () => {
responseFormat: '', // Empty string
}
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toEqual({
content: 'Regular text response',
@@ -775,7 +775,7 @@ describe('AgentBlockHandler', () => {
}
// Should not throw an error, but continue with default behavior
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toEqual({
content: 'Regular text response',
@@ -818,7 +818,7 @@ describe('AgentBlockHandler', () => {
}
// Should not throw an error, but continue with default behavior
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toEqual({
content: 'Regular text response',
@@ -840,7 +840,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
mockFetch.mockRejectedValue(new Error('Provider API Error'))
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Provider API Error'
)
})
@@ -888,7 +888,7 @@ describe('AgentBlockHandler', () => {
mockContext.stream = true
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toHaveProperty('stream')
expect(result).toHaveProperty('execution')
@@ -957,7 +957,7 @@ describe('AgentBlockHandler', () => {
mockContext.stream = true
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toHaveProperty('stream')
expect(result).toHaveProperty('execution')
@@ -1014,7 +1014,7 @@ describe('AgentBlockHandler', () => {
mockContext.stream = true
mockContext.selectedOutputs = [mockBlock.id]
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toHaveProperty('stream')
expect(result).toHaveProperty('execution')
@@ -1040,7 +1040,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1090,7 +1090,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1129,7 +1129,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1159,7 +1159,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1203,7 +1203,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1243,7 +1243,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1285,7 +1285,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
@@ -1312,7 +1312,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('azure-openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
@@ -1340,7 +1340,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
@@ -1367,7 +1367,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
@@ -1487,7 +1487,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
const result = await handler.execute(mockBlock, inputs, mcpContext)
const result = await handler.execute(mcpContext, mockBlock, inputs)
expect((result as any).content).toBe('I will use MCP tools to help you.')
expect((result as any).toolCalls.count).toBe(2)
@@ -1572,7 +1572,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
const result = await handler.execute(mockBlock, inputs, mcpContext)
const result = await handler.execute(mcpContext, mockBlock, inputs)
expect((result as any).content).toBe('Let me try to use this tool.')
expect((result as any).toolCalls.count).toBe(1)
@@ -1644,7 +1644,7 @@ describe('AgentBlockHandler', () => {
usageControl: tool.usageControl,
}))
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
// Verify that the agent executed successfully with MCP tools
expect(result).toBeDefined()
@@ -1712,7 +1712,7 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, contextWithWorkspace)
await handler.execute(contextWithWorkspace, mockBlock, inputs)
expect(contextWithWorkspace.workspaceId).toBe('test-workspace-456')
})

View File

@@ -1,9 +1,8 @@
import { createLogger } from '@/lib/logs/console/logger'
import { createMcpToolId } from '@/lib/mcp/utils'
import { getBaseUrl } from '@/lib/urls/utils'
import { getAllBlocks } from '@/blocks'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import { AGENT, BlockType, DEFAULTS, HTTP } from '@/executor/consts'
import type {
AgentInputs,
Message,
@@ -11,6 +10,9 @@ import type {
ToolInput,
} from '@/executor/handlers/agent/types'
import type { BlockHandler, ExecutionContext, StreamingExecution } from '@/executor/types'
import { collectBlockData } from '@/executor/utils/block-data'
import { buildAPIUrl, buildAuthHeaders, extractAPIErrorMessage } from '@/executor/utils/http'
import { stringifyJSON } from '@/executor/utils/json'
import { executeProviderRequest } from '@/providers'
import { getApiKey, getProviderFromModel, transformBlockTool } from '@/providers/utils'
import type { SerializedBlock } from '@/serializer/types'
@@ -19,38 +21,6 @@ import { getTool, getToolAsync } from '@/tools/utils'
const logger = createLogger('AgentBlockHandler')
const DEFAULT_MODEL = 'gpt-4o'
const DEFAULT_FUNCTION_TIMEOUT = 5000
const REQUEST_TIMEOUT = 120000
const CUSTOM_TOOL_PREFIX = 'custom_'
/**
* Helper function to collect runtime block outputs and name mappings
* for tag resolution in custom tools and prompts
*/
function collectBlockData(context: ExecutionContext): {
blockData: Record<string, any>
blockNameMapping: Record<string, string>
} {
const blockData: Record<string, any> = {}
const blockNameMapping: Record<string, string> = {}
for (const [id, state] of context.blockStates.entries()) {
if (state.output !== undefined) {
blockData[id] = state.output
const workflowBlock = context.workflow?.blocks?.find((b) => b.id === id)
if (workflowBlock?.metadata?.name) {
// Map both the display name and normalized form
blockNameMapping[workflowBlock.metadata.name] = id
const normalized = workflowBlock.metadata.name.replace(/\s+/g, '').toLowerCase()
blockNameMapping[normalized] = id
}
}
}
return { blockData, blockNameMapping }
}
/**
* Handler for Agent blocks that process LLM requests with optional tools.
*/
@@ -60,37 +30,36 @@ export class AgentBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: AgentInputs,
context: ExecutionContext
inputs: AgentInputs
): Promise<BlockOutput | StreamingExecution> {
logger.info(`Executing agent block: ${block.id}`)
const responseFormat = this.parseResponseFormat(inputs.responseFormat)
const model = inputs.model || DEFAULT_MODEL
const model = inputs.model || AGENT.DEFAULT_MODEL
const providerId = getProviderFromModel(model)
const formattedTools = await this.formatTools(inputs.tools || [], context)
const streamingConfig = this.getStreamingConfig(block, context)
const formattedTools = await this.formatTools(ctx, inputs.tools || [])
const streamingConfig = this.getStreamingConfig(ctx, block)
const messages = this.buildMessages(inputs)
const providerRequest = this.buildProviderRequest({
ctx,
providerId,
model,
messages,
inputs,
formattedTools,
responseFormat,
context,
streaming: streamingConfig.shouldUseStreaming ?? false,
})
return this.executeProviderRequest(providerRequest, block, responseFormat, context)
return this.executeProviderRequest(ctx, providerRequest, block, responseFormat)
}
private parseResponseFormat(responseFormat?: string | object): any {
if (!responseFormat || responseFormat === '') return undefined
// If already an object, process it directly
if (typeof responseFormat === 'object' && responseFormat !== null) {
const formatObj = responseFormat as any
if (!formatObj.schema && !formatObj.name) {
@@ -103,22 +72,16 @@ export class AgentBlockHandler implements BlockHandler {
return responseFormat
}
// Handle string values
if (typeof responseFormat === 'string') {
const trimmedValue = responseFormat.trim()
// Check for variable references like <start.input>
if (trimmedValue.startsWith('<') && trimmedValue.includes('>')) {
logger.info('Response format contains variable reference:', {
value: trimmedValue,
})
// Variable references should have been resolved by the resolver before reaching here
// If we still have a variable reference, it means it couldn't be resolved
// Return undefined to use default behavior (no structured response)
return undefined
}
// Try to parse as JSON
try {
const parsed = JSON.parse(trimmedValue)
@@ -135,13 +98,10 @@ export class AgentBlockHandler implements BlockHandler {
error: error.message,
value: trimmedValue,
})
// Return undefined instead of throwing - this allows execution to continue
// without structured response format
return undefined
}
}
// For any other type, return undefined
logger.warn('Unexpected response format type, using default behavior:', {
type: typeof responseFormat,
value: responseFormat,
@@ -149,7 +109,7 @@ export class AgentBlockHandler implements BlockHandler {
return undefined
}
private async formatTools(inputTools: ToolInput[], context: ExecutionContext): Promise<any[]> {
private async formatTools(ctx: ExecutionContext, inputTools: ToolInput[]): Promise<any[]> {
if (!Array.isArray(inputTools)) return []
const tools = await Promise.all(
@@ -161,12 +121,12 @@ export class AgentBlockHandler implements BlockHandler {
.map(async (tool) => {
try {
if (tool.type === 'custom-tool' && tool.schema) {
return await this.createCustomTool(tool, context)
return await this.createCustomTool(ctx, tool)
}
if (tool.type === 'mcp') {
return await this.createMcpTool(tool, context)
return await this.createMcpTool(ctx, tool)
}
return this.transformBlockTool(tool, context)
return this.transformBlockTool(ctx, tool)
} catch (error) {
logger.error(`[AgentHandler] Error creating tool:`, { tool, error })
return null
@@ -181,14 +141,14 @@ export class AgentBlockHandler implements BlockHandler {
return filteredTools
}
private async createCustomTool(tool: ToolInput, context: ExecutionContext): Promise<any> {
private async createCustomTool(ctx: ExecutionContext, tool: ToolInput): Promise<any> {
const userProvidedParams = tool.params || {}
const { filterSchemaForLLM, mergeToolParameters } = await import('@/tools/params')
const filteredSchema = filterSchemaForLLM(tool.schema.function.parameters, userProvidedParams)
const toolId = `${CUSTOM_TOOL_PREFIX}${tool.title}`
const toolId = `${AGENT.CUSTOM_TOOL_PREFIX}${tool.title}`
const base: any = {
id: toolId,
name: tool.schema.function.name,
@@ -207,27 +167,27 @@ export class AgentBlockHandler implements BlockHandler {
const mergedParams = mergeToolParameters(userProvidedParams, callParams)
// Collect block outputs for tag resolution
const { blockData, blockNameMapping } = collectBlockData(context)
const { blockData, blockNameMapping } = collectBlockData(ctx)
const result = await executeTool(
'function_execute',
{
code: tool.code,
...mergedParams,
timeout: tool.timeout ?? DEFAULT_FUNCTION_TIMEOUT,
envVars: context.environmentVariables || {},
workflowVariables: context.workflowVariables || {},
timeout: tool.timeout ?? AGENT.DEFAULT_FUNCTION_TIMEOUT,
envVars: ctx.environmentVariables || {},
workflowVariables: ctx.workflowVariables || {},
blockData,
blockNameMapping,
isCustomTool: true,
_context: {
workflowId: context.workflowId,
workspaceId: context.workspaceId,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
},
},
false, // skipProxy
false, // skipPostProcess
context // execution context for file processing
false,
false,
ctx
)
if (!result.success) {
@@ -240,7 +200,7 @@ export class AgentBlockHandler implements BlockHandler {
return base
}
private async createMcpTool(tool: ToolInput, context: ExecutionContext): Promise<any> {
private async createMcpTool(ctx: ExecutionContext, tool: ToolInput): Promise<any> {
const { serverId, toolName, ...userProvidedParams } = tool.params || {}
if (!serverId || !toolName) {
@@ -249,31 +209,20 @@ export class AgentBlockHandler implements BlockHandler {
}
try {
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
if (typeof window === 'undefined') {
try {
const { generateInternalToken } = await import('@/lib/auth/internal')
const internalToken = await generateInternalToken()
headers.Authorization = `Bearer ${internalToken}`
} catch (error) {
logger.error(`Failed to generate internal token for MCP tool discovery:`, error)
}
}
const url = new URL('/api/mcp/tools/discover', getBaseUrl())
url.searchParams.set('serverId', serverId)
if (context.workspaceId) {
url.searchParams.set('workspaceId', context.workspaceId)
} else {
if (!ctx.workspaceId) {
throw new Error('workspaceId is required for MCP tool discovery')
}
if (context.workflowId) {
url.searchParams.set('workflowId', context.workflowId)
} else {
if (!ctx.workflowId) {
throw new Error('workflowId is required for internal JWT authentication')
}
const headers = await buildAuthHeaders()
const url = buildAPIUrl('/api/mcp/tools/discover', {
serverId,
workspaceId: ctx.workspaceId,
workflowId: ctx.workflowId,
})
const response = await fetch(url.toString(), {
method: 'GET',
headers,
@@ -310,27 +259,18 @@ export class AgentBlockHandler implements BlockHandler {
executeFunction: async (callParams: Record<string, any>) => {
logger.info(`Executing MCP tool ${toolName} on server ${serverId}`)
const headers: Record<string, string> = { 'Content-Type': 'application/json' }
const headers = await buildAuthHeaders()
const execUrl = buildAPIUrl('/api/mcp/tools/execute')
if (typeof window === 'undefined') {
try {
const { generateInternalToken } = await import('@/lib/auth/internal')
const internalToken = await generateInternalToken()
headers.Authorization = `Bearer ${internalToken}`
} catch (error) {
logger.error(`Failed to generate internal token for MCP tool ${toolName}:`, error)
}
}
const execResponse = await fetch(`${getBaseUrl()}/api/mcp/tools/execute`, {
const execResponse = await fetch(execUrl.toString(), {
method: 'POST',
headers,
body: JSON.stringify({
body: stringifyJSON({
serverId,
toolName,
arguments: callParams,
workspaceId: context.workspaceId,
workflowId: context.workflowId,
workspaceId: ctx.workspaceId,
workflowId: ctx.workflowId,
}),
})
@@ -363,11 +303,11 @@ export class AgentBlockHandler implements BlockHandler {
}
}
private async transformBlockTool(tool: ToolInput, context: ExecutionContext) {
private async transformBlockTool(ctx: ExecutionContext, tool: ToolInput) {
const transformedTool = await transformBlockTool(tool, {
selectedOperation: tool.operation,
getAllBlocks,
getToolAsync: (toolId: string) => getToolAsync(toolId, context.workflowId),
getToolAsync: (toolId: string) => getToolAsync(toolId, ctx.workflowId),
getTool,
})
@@ -377,9 +317,9 @@ export class AgentBlockHandler implements BlockHandler {
return transformedTool
}
private getStreamingConfig(block: SerializedBlock, context: ExecutionContext): StreamingConfig {
private getStreamingConfig(ctx: ExecutionContext, block: SerializedBlock): StreamingConfig {
const isBlockSelectedForOutput =
context.selectedOutputs?.some((outputId) => {
ctx.selectedOutputs?.some((outputId) => {
if (outputId === block.id) return true
const firstUnderscoreIndex = outputId.indexOf('_')
return (
@@ -387,8 +327,8 @@ export class AgentBlockHandler implements BlockHandler {
)
}) ?? false
const hasOutgoingConnections = context.edges?.some((edge) => edge.source === block.id) ?? false
const shouldUseStreaming = Boolean(context.stream) && isBlockSelectedForOutput
const hasOutgoingConnections = ctx.edges?.some((edge) => edge.source === block.id) ?? false
const shouldUseStreaming = Boolean(ctx.stream) && isBlockSelectedForOutput
return { shouldUseStreaming, isBlockSelectedForOutput, hasOutgoingConnections }
}
@@ -492,36 +432,27 @@ export class AgentBlockHandler implements BlockHandler {
}
private buildProviderRequest(config: {
ctx: ExecutionContext
providerId: string
model: string
messages: Message[] | undefined
inputs: AgentInputs
formattedTools: any[]
responseFormat: any
context: ExecutionContext
streaming: boolean
}) {
const {
providerId,
model,
messages,
inputs,
formattedTools,
responseFormat,
context,
streaming,
} = config
const { ctx, providerId, model, messages, inputs, formattedTools, responseFormat, streaming } =
config
const validMessages = this.validateMessages(messages)
// Collect block outputs for runtime resolution
const { blockData, blockNameMapping } = collectBlockData(context)
const { blockData, blockNameMapping } = collectBlockData(ctx)
return {
provider: providerId,
model,
systemPrompt: validMessages ? undefined : inputs.systemPrompt,
context: JSON.stringify(messages),
context: stringifyJSON(messages),
tools: formattedTools,
temperature: inputs.temperature,
maxTokens: inputs.maxTokens,
@@ -529,12 +460,12 @@ export class AgentBlockHandler implements BlockHandler {
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
responseFormat,
workflowId: context.workflowId,
workspaceId: context.workspaceId,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
stream: streaming,
messages,
environmentVariables: context.environmentVariables || {},
workflowVariables: context.workflowVariables || {},
environmentVariables: ctx.environmentVariables || {},
workflowVariables: ctx.workflowVariables || {},
blockData,
blockNameMapping,
reasoningEffort: inputs.reasoningEffort,
@@ -559,10 +490,10 @@ export class AgentBlockHandler implements BlockHandler {
}
private async executeProviderRequest(
ctx: ExecutionContext,
providerRequest: any,
block: SerializedBlock,
responseFormat: any,
context: ExecutionContext
responseFormat: any
): Promise<BlockOutput | StreamingExecution> {
const providerId = providerRequest.provider
const model = providerRequest.model
@@ -573,41 +504,34 @@ export class AgentBlockHandler implements BlockHandler {
if (!isBrowser) {
return this.executeServerSide(
ctx,
providerRequest,
providerId,
model,
block,
responseFormat,
context,
providerStartTime
)
}
return this.executeBrowserSide(
providerRequest,
block,
responseFormat,
context,
providerStartTime
)
return this.executeBrowserSide(ctx, providerRequest, block, responseFormat, providerStartTime)
} catch (error) {
this.handleExecutionError(error, providerStartTime, providerId, model, context, block)
this.handleExecutionError(error, providerStartTime, providerId, model, ctx, block)
throw error
}
}
private async executeServerSide(
ctx: ExecutionContext,
providerRequest: any,
providerId: string,
model: string,
block: SerializedBlock,
responseFormat: any,
context: ExecutionContext,
providerStartTime: number
) {
const finalApiKey = this.getApiKey(providerId, model, providerRequest.apiKey)
// Collect block outputs for runtime resolution
const { blockData, blockNameMapping } = collectBlockData(context)
const { blockData, blockNameMapping } = collectBlockData(ctx)
const response = await executeProviderRequest(providerId, {
model,
@@ -624,42 +548,42 @@ export class AgentBlockHandler implements BlockHandler {
workspaceId: providerRequest.workspaceId,
stream: providerRequest.stream,
messages: 'messages' in providerRequest ? providerRequest.messages : undefined,
environmentVariables: context.environmentVariables || {},
workflowVariables: context.workflowVariables || {},
environmentVariables: ctx.environmentVariables || {},
workflowVariables: ctx.workflowVariables || {},
blockData,
blockNameMapping,
})
this.logExecutionSuccess(providerId, model, context, block, providerStartTime, response)
this.logExecutionSuccess(providerId, model, ctx, block, providerStartTime, response)
return this.processProviderResponse(response, block, responseFormat)
}
private async executeBrowserSide(
ctx: ExecutionContext,
providerRequest: any,
block: SerializedBlock,
responseFormat: any,
context: ExecutionContext,
providerStartTime: number
) {
logger.info('Using HTTP provider request (browser environment)')
const url = new URL('/api/providers', getBaseUrl())
const url = buildAPIUrl('/api/providers')
const response = await fetch(url.toString(), {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(providerRequest),
signal: AbortSignal.timeout(REQUEST_TIMEOUT),
headers: { 'Content-Type': HTTP.CONTENT_TYPE.JSON },
body: stringifyJSON(providerRequest),
signal: AbortSignal.timeout(AGENT.REQUEST_TIMEOUT),
})
if (!response.ok) {
const errorMessage = await this.extractErrorMessage(response)
const errorMessage = await extractAPIErrorMessage(response)
throw new Error(errorMessage)
}
this.logExecutionSuccess(
providerRequest.provider,
providerRequest.model,
context,
ctx,
block,
providerStartTime,
'HTTP response'
@@ -667,13 +591,11 @@ export class AgentBlockHandler implements BlockHandler {
// Check if this is a streaming response
const contentType = response.headers.get('Content-Type')
if (contentType?.includes('text/event-stream')) {
// Handle streaming response
if (contentType?.includes(HTTP.CONTENT_TYPE.EVENT_STREAM)) {
logger.info('Received streaming response')
return this.handleStreamingResponse(response, block)
}
// Handle regular JSON response
const result = await response.json()
return this.processProviderResponse(result, block, responseFormat)
}
@@ -682,24 +604,21 @@ export class AgentBlockHandler implements BlockHandler {
response: Response,
block: SerializedBlock
): Promise<StreamingExecution> {
// Check if we have execution data in headers (from StreamingExecution)
const executionDataHeader = response.headers.get('X-Execution-Data')
if (executionDataHeader) {
// Parse execution data from header
try {
const executionData = JSON.parse(executionDataHeader)
// Create StreamingExecution object
return {
stream: response.body!,
execution: {
success: executionData.success,
output: executionData.output || {},
error: executionData.error,
logs: [], // Logs are stripped from headers, will be populated by executor
logs: [],
metadata: executionData.metadata || {
duration: 0,
duration: DEFAULTS.EXECUTION_TIME,
startTime: new Date().toISOString(),
},
isStreaming: true,
@@ -710,11 +629,9 @@ export class AgentBlockHandler implements BlockHandler {
}
} catch (error) {
logger.error('Failed to parse execution data from header:', error)
// Fall back to minimal streaming execution
}
}
// Fallback for plain ReadableStream or when header parsing fails
return this.createMinimalStreamingExecution(response.body!)
}
@@ -732,23 +649,10 @@ export class AgentBlockHandler implements BlockHandler {
}
}
private async extractErrorMessage(response: Response): Promise<string> {
let errorMessage = `Provider API request failed with status ${response.status}`
try {
const errorData = await response.json()
if (errorData.error) {
errorMessage = errorData.error
}
} catch (_e) {
// Use default message if JSON parsing fails
}
return errorMessage
}
private logExecutionSuccess(
provider: string,
model: string,
context: ExecutionContext,
ctx: ExecutionContext,
block: SerializedBlock,
startTime: number,
response: any
@@ -764,7 +668,7 @@ export class AgentBlockHandler implements BlockHandler {
logger.info('Provider request completed successfully', {
provider,
model,
workflowId: context.workflowId,
workflowId: ctx.workflowId,
blockId: block.id,
executionTime,
responseType,
@@ -776,7 +680,7 @@ export class AgentBlockHandler implements BlockHandler {
startTime: number,
provider: string,
model: string,
context: ExecutionContext,
ctx: ExecutionContext,
block: SerializedBlock
) {
const executionTime = Date.now() - startTime
@@ -786,14 +690,14 @@ export class AgentBlockHandler implements BlockHandler {
executionTime,
provider,
model,
workflowId: context.workflowId,
workflowId: ctx.workflowId,
blockId: block.id,
})
if (!(error instanceof Error)) return
logger.error('Provider request error details', {
workflowId: context.workflowId,
workflowId: ctx.workflowId,
blockId: block.id,
errorName: error.name,
errorMessage: error.message,
@@ -862,7 +766,7 @@ export class AgentBlockHandler implements BlockHandler {
output: {},
logs: [],
metadata: {
duration: 0,
duration: DEFAULTS.EXECUTION_TIME,
startTime: new Date().toISOString(),
},
},
@@ -892,7 +796,6 @@ export class AgentBlockHandler implements BlockHandler {
error: error instanceof Error ? error.message : 'Unknown error',
})
// LLM did not adhere to structured response format
logger.error('LLM did not adhere to structured response format:', {
content: content.substring(0, 200) + (content.length > 200 ? '...' : ''),
responseFormat: responseFormat,
@@ -914,12 +817,21 @@ export class AgentBlockHandler implements BlockHandler {
}
}
private createResponseMetadata(result: any) {
private createResponseMetadata(result: {
tokens?: { prompt?: number; completion?: number; total?: number }
toolCalls?: Array<any>
timing?: any
cost?: any
}) {
return {
tokens: result.tokens || { prompt: 0, completion: 0, total: 0 },
tokens: result.tokens || {
prompt: DEFAULTS.TOKENS.PROMPT,
completion: DEFAULTS.TOKENS.COMPLETION,
total: DEFAULTS.TOKENS.TOTAL,
},
toolCalls: {
list: result.toolCalls ? result.toolCalls.map(this.formatToolCall.bind(this)) : [],
count: result.toolCalls?.length || 0,
list: result.toolCalls?.map(this.formatToolCall.bind(this)) || [],
count: result.toolCalls?.length || DEFAULTS.EXECUTION_TIME,
},
providerTiming: result.timing,
cost: result.cost,
@@ -941,6 +853,8 @@ export class AgentBlockHandler implements BlockHandler {
}
private stripCustomToolPrefix(name: string): string {
return name.startsWith('custom_') ? name.replace('custom_', '') : name
return name.startsWith(AGENT.CUSTOM_TOOL_PREFIX)
? name.replace(AGENT.CUSTOM_TOOL_PREFIX, '')
: name
}
}

View File

@@ -97,7 +97,7 @@ describe('ApiBlockHandler', () => {
mockExecuteTool.mockResolvedValue({ success: true, output: { data: 'Success' } })
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetTool).toHaveBeenCalledWith('http_request')
expect(mockExecuteTool).toHaveBeenCalledWith(
@@ -122,7 +122,7 @@ describe('ApiBlockHandler', () => {
const expectedOutput = { data: null, status: 200, headers: {} }
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetTool).toHaveBeenCalledWith('http_request')
expect(mockExecuteTool).not.toHaveBeenCalled()
@@ -132,7 +132,7 @@ describe('ApiBlockHandler', () => {
it('should throw error for invalid URL format (no protocol)', async () => {
const inputs = { url: 'example.com/api' }
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Invalid URL: "example.com/api" - URL must include protocol (try "https://example.com/api")'
)
expect(mockExecuteTool).not.toHaveBeenCalled()
@@ -141,7 +141,7 @@ describe('ApiBlockHandler', () => {
it('should throw error for generally invalid URL format', async () => {
const inputs = { url: 'htp:/invalid-url' }
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Invalid URL: "htp:\/invalid-url" - URL must include protocol/
)
expect(mockExecuteTool).not.toHaveBeenCalled()
@@ -154,7 +154,7 @@ describe('ApiBlockHandler', () => {
}
const expectedParsedBody = { key: 'value', nested: { num: 1 } }
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'http_request',
@@ -171,7 +171,7 @@ describe('ApiBlockHandler', () => {
body: 'This is plain text',
}
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'http_request',
@@ -188,7 +188,7 @@ describe('ApiBlockHandler', () => {
body: null,
}
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'http_request',
@@ -211,7 +211,7 @@ describe('ApiBlockHandler', () => {
error: 'Resource not found',
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'HTTP Request failed: URL: https://example.com/notfound | Method: GET | Error: Resource not found | Status: 404 | Status text: Not Found - The requested resource was not found'
)
expect(mockExecuteTool).toHaveBeenCalled()
@@ -223,7 +223,7 @@ describe('ApiBlockHandler', () => {
// Override mock to return undefined for this test
mockGetTool.mockImplementation(() => undefined)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Tool not found: http_request'
)
expect(mockExecuteTool).not.toHaveBeenCalled()
@@ -236,7 +236,7 @@ describe('ApiBlockHandler', () => {
error: 'Request failed due to CORS policy',
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/CORS policy prevented the request, try using a proxy or server-side request/
)
})
@@ -245,7 +245,7 @@ describe('ApiBlockHandler', () => {
const inputs = { url: 'https://unreachable.local' }
mockExecuteTool.mockResolvedValue({ success: false, error: 'Failed to fetch' })
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/Network error, check if the URL is accessible and if you have internet connectivity/
)
})

View File

@@ -1,6 +1,7 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/consts'
import { BlockType, HTTP } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import { stringifyJSON } from '@/executor/utils/json'
import type { SerializedBlock } from '@/serializer/types'
import { executeTool } from '@/tools'
import { getTool } from '@/tools/utils'
@@ -16,23 +17,20 @@ export class ApiBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<any> {
const tool = getTool(block.config.tool)
if (!tool) {
throw new Error(`Tool not found: ${block.config.tool}`)
}
// Early return with empty success response if URL is not provided or empty
if (tool.name?.includes('HTTP') && (!inputs.url || inputs.url.trim() === '')) {
return { data: null, status: 200, headers: {} }
return { data: null, status: HTTP.STATUS.OK, headers: {} }
}
// Pre-validate common HTTP request issues to provide better error messages
if (tool.name?.includes('HTTP') && inputs.url) {
// Strip any surrounding quotes that might have been added during resolution
let urlToValidate = inputs.url
if (typeof urlToValidate === 'string') {
if (
@@ -40,19 +38,16 @@ export class ApiBlockHandler implements BlockHandler {
(urlToValidate.startsWith("'") && urlToValidate.endsWith("'"))
) {
urlToValidate = urlToValidate.slice(1, -1)
// Update the input with unquoted URL
inputs.url = urlToValidate
}
}
// Check for missing protocol
if (!urlToValidate.match(/^https?:\/\//i)) {
throw new Error(
`Invalid URL: "${urlToValidate}" - URL must include protocol (try "https://${urlToValidate}")`
)
}
// Detect other common URL issues
try {
new URL(urlToValidate)
} catch (e: any) {
@@ -63,34 +58,28 @@ export class ApiBlockHandler implements BlockHandler {
try {
const processedInputs = { ...inputs }
// Handle body specifically to ensure it's properly processed for API requests
if (processedInputs.body !== undefined) {
// If body is a string that looks like JSON, parse it
if (typeof processedInputs.body === 'string') {
try {
// Trim whitespace before checking for JSON pattern
const trimmedBody = processedInputs.body.trim()
if (trimmedBody.startsWith('{') || trimmedBody.startsWith('[')) {
processedInputs.body = JSON.parse(trimmedBody)
logger.info(
'[ApiBlockHandler] Parsed JSON body:',
JSON.stringify(processedInputs.body, null, 2)
stringifyJSON(processedInputs.body)
)
}
} catch (e) {
logger.info('[ApiBlockHandler] Failed to parse body as JSON, using as string:', e)
// Keep as string if parsing fails
}
} else if (processedInputs.body === null) {
// Convert null to undefined for consistency with API expectations
processedInputs.body = undefined
}
}
// Ensure the final processed body is logged
logger.info(
'[ApiBlockHandler] Final processed request body:',
JSON.stringify(processedInputs.body, null, 2)
stringifyJSON(processedInputs.body)
)
const result = await executeTool(
@@ -98,36 +87,33 @@ export class ApiBlockHandler implements BlockHandler {
{
...processedInputs,
_context: {
workflowId: context.workflowId,
workspaceId: context.workspaceId,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
},
},
false, // skipProxy
false, // skipPostProcess
context // execution context for file processing
false,
false,
ctx
)
if (!result.success) {
const errorDetails = []
// Add request details to error message
if (inputs.url) errorDetails.push(`URL: ${inputs.url}`)
if (inputs.method) errorDetails.push(`Method: ${inputs.method}`)
// Add response details
if (result.error) errorDetails.push(`Error: ${result.error}`)
if (result.output?.status) errorDetails.push(`Status: ${result.output.status}`)
if (result.output?.statusText) errorDetails.push(`Status text: ${result.output.statusText}`)
// Add specific suggestions for common error codes
let suggestion = ''
if (result.output?.status === 403) {
if (result.output?.status === HTTP.STATUS.FORBIDDEN) {
suggestion = ' - This may be due to CORS restrictions or authorization issues'
} else if (result.output?.status === 404) {
} else if (result.output?.status === HTTP.STATUS.NOT_FOUND) {
suggestion = ' - The requested resource was not found'
} else if (result.output?.status === 429) {
} else if (result.output?.status === HTTP.STATUS.TOO_MANY_REQUESTS) {
suggestion = ' - Too many requests, you may need to implement rate limiting'
} else if (result.output?.status >= 500) {
} else if (result.output?.status >= HTTP.STATUS.SERVER_ERROR) {
suggestion = ' - Server error, the target server is experiencing issues'
} else if (result.error?.includes('CORS')) {
suggestion =
@@ -142,10 +128,8 @@ export class ApiBlockHandler implements BlockHandler {
? `HTTP Request failed: ${errorDetails.join(' | ')}${suggestion}`
: `API request to ${tool.name || block.config.tool} failed with no error message`
// Create a detailed error object with formatted message
const error = new Error(errorMessage)
// Add additional properties for debugging
Object.assign(error, {
toolId: block.config.tool,
toolName: tool.name || 'Unknown tool',
@@ -165,17 +149,13 @@ export class ApiBlockHandler implements BlockHandler {
return result.output
} catch (error: any) {
// Ensure we have a meaningful error message
if (!error.message || error.message === 'undefined (undefined)') {
// Construct a detailed error message with available information
let errorMessage = `API request to ${tool.name || block.config.tool} failed`
// Add details if available
if (inputs.url) errorMessage += `: ${inputs.url}`
if (error.status) errorMessage += ` (Status: ${error.status})`
if (error.statusText) errorMessage += ` - ${error.statusText}`
// If we still have no details, give a generic but helpful message
if (errorMessage === `API request to ${tool.name || block.config.tool} failed`) {
errorMessage += ` - ${block.metadata?.name || 'Unknown error'}`
}
@@ -183,12 +163,10 @@ export class ApiBlockHandler implements BlockHandler {
error.message = errorMessage
}
// Add additional context to the error
if (typeof error === 'object' && error !== null) {
if (!error.toolId) error.toolId = block.config.tool
if (!error.blockName) error.blockName = block.metadata?.name || 'Unnamed Block'
// Add request details if missing
if (inputs && !error.request) {
error.request = {
url: inputs.url,

View File

@@ -1,26 +1,21 @@
import '@/executor/__test-utils__/mock-dependencies'
import { beforeEach, describe, expect, it, type Mocked, type MockedClass, vi } from 'vitest'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockType } from '@/executor/consts'
import { ConditionBlockHandler } from '@/executor/handlers/condition/condition-handler'
import { PathTracker } from '@/executor/path/path'
import { InputResolver } from '@/executor/resolver/resolver'
import type { BlockState, ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
const MockPathTracker = PathTracker as MockedClass<typeof PathTracker>
const MockInputResolver = InputResolver as MockedClass<typeof InputResolver>
describe('ConditionBlockHandler', () => {
let handler: ConditionBlockHandler
let mockBlock: SerializedBlock
let mockContext: ExecutionContext
let mockPathTracker: Mocked<PathTracker>
let mockResolver: Mocked<InputResolver>
let mockWorkflow: Partial<SerializedWorkflow>
let mockSourceBlock: SerializedBlock
let mockTargetBlock1: SerializedBlock
let mockTargetBlock2: SerializedBlock
let mockResolver: any
let mockPathTracker: any
beforeEach(() => {
// Define blocks first
@@ -79,16 +74,13 @@ describe('ConditionBlockHandler', () => {
],
}
mockPathTracker = new MockPathTracker(mockWorkflow as SerializedWorkflow) as Mocked<PathTracker>
mockResolver = new MockInputResolver(
mockWorkflow as SerializedWorkflow,
{}
) as Mocked<InputResolver>
mockResolver = {
resolveVariableReferences: vi.fn((expr) => expr),
resolveBlockReferences: vi.fn((expr) => expr),
resolveEnvVariables: vi.fn((expr) => expr),
}
// Ensure the methods exist as mock functions on the instance
mockResolver.resolveBlockReferences = vi.fn()
mockResolver.resolveVariableReferences = vi.fn()
mockResolver.resolveEnvVariables = vi.fn()
mockPathTracker = {}
handler = new ConditionBlockHandler(mockPathTracker, mockResolver)
@@ -147,6 +139,7 @@ describe('ConditionBlockHandler', () => {
blockTitle: 'Target Block 1',
},
selectedConditionId: 'cond1',
selectedOption: 'cond1',
}
// Mock the full resolution pipeline
@@ -154,7 +147,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('context.value > 5')
mockResolver.resolveEnvVariables.mockReturnValue('context.value > 5')
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockResolver.resolveVariableReferences).toHaveBeenCalledWith(
'context.value > 5',
@@ -187,6 +180,7 @@ describe('ConditionBlockHandler', () => {
blockTitle: 'Target Block 2',
},
selectedConditionId: 'else1',
selectedOption: 'else1',
}
// Mock the full resolution pipeline
@@ -194,7 +188,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('context.value < 0')
mockResolver.resolveEnvVariables.mockReturnValue('context.value < 0')
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockResolver.resolveVariableReferences).toHaveBeenCalledWith(
'context.value < 0',
@@ -213,7 +207,7 @@ describe('ConditionBlockHandler', () => {
it('should handle invalid conditions JSON format', async () => {
const inputs = { conditions: '{ "invalid json ' }
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Invalid conditions format: Unterminated string.*/
)
})
@@ -230,7 +224,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('10 > 5')
mockResolver.resolveEnvVariables.mockReturnValue('10 > 5')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockResolver.resolveVariableReferences).toHaveBeenCalledWith(
'{{source-block-1.value}} > 5',
@@ -257,7 +251,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('"john" !== null')
mockResolver.resolveEnvVariables.mockReturnValue('"john" !== null')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockResolver.resolveVariableReferences).toHaveBeenCalledWith(
'<variable.userName> !== null',
@@ -284,7 +278,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('{{POOP}} === "hi"')
mockResolver.resolveEnvVariables.mockReturnValue('"hi" === "hi"')
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockResolver.resolveVariableReferences).toHaveBeenCalledWith(
'{{POOP}} === "hi"',
@@ -312,7 +306,7 @@ describe('ConditionBlockHandler', () => {
throw resolutionError
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Failed to resolve references in condition: Could not resolve reference: invalid-ref'
)
})
@@ -331,19 +325,24 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('context.nonExistentProperty.doSomething()')
mockResolver.resolveEnvVariables.mockReturnValue('context.nonExistentProperty.doSomething()')
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Evaluation error in condition "if": Evaluation error in condition: Cannot read properties of undefined \(reading 'doSomething'\)\. \(Resolved: context\.nonExistentProperty\.doSomething\(\)\)$/
)
})
it('should throw error if source block output is missing', async () => {
it('should handle missing source block output gracefully', async () => {
const conditions = [{ id: 'cond1', title: 'if', value: 'true' }]
const inputs = { conditions: JSON.stringify(conditions) }
mockContext.blockStates.delete(mockSourceBlock.id)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
`No output found for source block ${mockSourceBlock.id}`
)
mockResolver.resolveVariableReferences.mockReturnValue('true')
mockResolver.resolveBlockReferences.mockReturnValue('true')
mockResolver.resolveEnvVariables.mockReturnValue('true')
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(result).toHaveProperty('conditionResult', true)
expect(result).toHaveProperty('selectedConditionId', 'cond1')
})
it('should throw error if target block is missing', async () => {
@@ -357,7 +356,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('true')
mockResolver.resolveEnvVariables.mockReturnValue('true')
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
`Target block ${mockTargetBlock1.id} not found`
)
})
@@ -389,7 +388,7 @@ describe('ConditionBlockHandler', () => {
.mockReturnValueOnce('false')
.mockReturnValueOnce('context.value === 99')
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
`No matching path found for condition block "${mockBlock.metadata?.name}", and no 'else' block exists.`
)
})
@@ -408,7 +407,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveBlockReferences.mockReturnValue('context.item === "apple"')
mockResolver.resolveEnvVariables.mockReturnValue('context.item === "apple"')
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('cond1')
expect((result as any).selectedConditionId).toBe('cond1')

View File

@@ -1,8 +1,6 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import type { PathTracker } from '@/executor/path/path'
import type { InputResolver } from '@/executor/resolver/resolver'
import { BlockType, CONDITION, DEFAULTS, EDGE } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
@@ -13,25 +11,26 @@ const logger = createLogger('ConditionBlockHandler')
* Returns true if condition is met, false otherwise
*/
export async function evaluateConditionExpression(
ctx: ExecutionContext,
conditionExpression: string,
context: ExecutionContext,
block: SerializedBlock,
resolver: InputResolver,
resolver: any,
providedEvalContext?: Record<string, any>
): Promise<boolean> {
// Build evaluation context - use provided context or just loop context
const evalContext = providedEvalContext || {
// Add loop context if applicable
...(context.loopItems.get(block.id) || {}),
...(ctx.loopItems.get(block.id) || {}),
}
let resolvedConditionValue = conditionExpression
try {
// Use full resolution pipeline: variables -> block references -> env vars
const resolvedVars = resolver.resolveVariableReferences(conditionExpression, block)
const resolvedRefs = resolver.resolveBlockReferences(resolvedVars, context, block)
resolvedConditionValue = resolver.resolveEnvVariables(resolvedRefs)
logger.info(`Resolved condition: from "${conditionExpression}" to "${resolvedConditionValue}"`)
if (resolver) {
const resolvedVars = resolver.resolveVariableReferences(conditionExpression, block)
const resolvedRefs = resolver.resolveBlockReferences(resolvedVars, ctx, block)
resolvedConditionValue = resolver.resolveEnvVariables(resolvedRefs)
logger.info(
`Resolved condition: from "${conditionExpression}" to "${resolvedConditionValue}"`
)
}
} catch (resolveError: any) {
logger.error(`Failed to resolve references in condition: ${resolveError.message}`, {
conditionExpression,
@@ -40,10 +39,8 @@ export async function evaluateConditionExpression(
throw new Error(`Failed to resolve references in condition: ${resolveError.message}`)
}
// Evaluate the RESOLVED condition string
try {
logger.info(`Evaluating resolved condition: "${resolvedConditionValue}"`, { evalContext })
// IMPORTANT: The resolved value (e.g., "some string".length > 0) IS the code to run
const conditionMet = new Function(
'context',
`with(context) { return ${resolvedConditionValue} }`
@@ -67,13 +64,9 @@ export async function evaluateConditionExpression(
* Handler for Condition blocks that evaluate expressions to determine execution paths.
*/
export class ConditionBlockHandler implements BlockHandler {
/**
* @param pathTracker - Utility for tracking execution paths
* @param resolver - Utility for resolving inputs
*/
constructor(
private pathTracker: PathTracker,
private resolver: InputResolver
private pathTracker?: any,
private resolver?: any
) {}
canHandle(block: SerializedBlock): boolean {
@@ -81,103 +74,123 @@ export class ConditionBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput> {
logger.info(`Executing condition block: ${block.id}`, {
// Log raw inputs before parsing
rawConditionsInput: inputs.conditions,
})
// 1. Parse the conditions JSON string FIRST
let conditions: Array<{ id: string; title: string; value: string }> = []
try {
conditions = Array.isArray(inputs.conditions)
? inputs.conditions
: JSON.parse(inputs.conditions || '[]')
logger.info('Parsed conditions:', JSON.stringify(conditions, null, 2))
} catch (error: any) {
logger.error('Failed to parse conditions JSON:', {
conditionsInput: inputs.conditions,
error,
})
throw new Error(`Invalid conditions format: ${error.message}`)
}
const conditions = this.parseConditions(inputs.conditions)
// Find source block for the condition (used for context if needed, maybe remove later)
const sourceBlockId = context.workflow?.connections.find(
(conn) => conn.target === block.id
)?.source
const sourceBlockId = ctx.workflow?.connections.find((conn) => conn.target === block.id)?.source
const evalContext = this.buildEvaluationContext(ctx, block.id, sourceBlockId)
const sourceOutput = sourceBlockId ? ctx.blockStates.get(sourceBlockId)?.output : null
if (!sourceBlockId) {
throw new Error(`No source block found for condition block ${block.id}`)
}
const outgoingConnections = ctx.workflow?.connections.filter((conn) => conn.source === block.id)
const sourceOutput = context.blockStates.get(sourceBlockId)?.output
if (!sourceOutput) {
throw new Error(`No output found for source block ${sourceBlockId}`)
}
// Get source block to derive a dynamic key (maybe remove later)
const sourceBlock = context.workflow?.blocks.find((b) => b.id === sourceBlockId)
if (!sourceBlock) {
throw new Error(`Source block ${sourceBlockId} not found`)
}
// Build evaluation context (primarily for potential 'context' object in Function)
// We might not strictly need sourceKey here if references handle everything
const evalContext = {
...(typeof sourceOutput === 'object' && sourceOutput !== null ? sourceOutput : {}),
// Add other relevant context if needed, like loop variables
...(context.loopItems.get(block.id) || {}), // Example: Add loop context if applicable
}
logger.info('Base eval context:', JSON.stringify(evalContext, null, 2))
// Get outgoing connections
const outgoingConnections = context.workflow?.connections.filter(
(conn) => conn.source === block.id
const { selectedConnection, selectedCondition } = await this.evaluateConditions(
conditions,
outgoingConnections || [],
evalContext,
ctx,
block
)
// Evaluate conditions in order (if, else if, else)
let selectedConnection: { target: string; sourceHandle?: string } | null = null
let selectedCondition: { id: string; title: string; value: string } | null = null
const targetBlock = ctx.workflow?.blocks.find((b) => b.id === selectedConnection?.target)
if (!targetBlock) {
throw new Error(`Target block ${selectedConnection?.target} not found`)
}
for (const condition of conditions) {
// Skip 'else' conditions that have no value to evaluate
if (condition.title === 'else') {
const connection = outgoingConnections?.find(
(conn) => conn.sourceHandle === `condition-${condition.id}`
) as { target: string; sourceHandle?: string } | undefined
if (connection) {
selectedConnection = connection
selectedCondition = condition
break // 'else' is always the last path if reached
logger.info(
`Condition block ${block.id} selected path: ${selectedCondition.title} (${selectedCondition.id}) -> ${targetBlock.metadata?.name || targetBlock.id}`
)
const decisionKey = ctx.currentVirtualBlockId || block.id
ctx.decisions.condition.set(decisionKey, selectedCondition.id)
return {
...((sourceOutput as any) || {}),
conditionResult: true,
selectedPath: {
blockId: targetBlock.id,
blockType: targetBlock.metadata?.id || DEFAULTS.BLOCK_TYPE,
blockTitle: targetBlock.metadata?.name || DEFAULTS.BLOCK_TITLE,
},
selectedOption: selectedCondition.id,
selectedConditionId: selectedCondition.id,
}
}
private parseConditions(input: any): Array<{ id: string; title: string; value: string }> {
try {
const conditions = Array.isArray(input) ? input : JSON.parse(input || '[]')
logger.info('Parsed conditions:', conditions)
return conditions
} catch (error: any) {
logger.error('Failed to parse conditions:', { input, error })
throw new Error(`Invalid conditions format: ${error.message}`)
}
}
private buildEvaluationContext(
ctx: ExecutionContext,
blockId: string,
sourceBlockId?: string
): Record<string, any> {
let evalContext: Record<string, any> = {
...(ctx.loopItems.get(blockId) || {}),
}
if (sourceBlockId) {
const sourceOutput = ctx.blockStates.get(sourceBlockId)?.output
if (sourceOutput && typeof sourceOutput === 'object' && sourceOutput !== null) {
evalContext = {
...evalContext,
...sourceOutput,
}
continue // Should ideally not happen if 'else' exists and has a connection
}
}
logger.info('Base eval context:', evalContext)
return evalContext
}
private async evaluateConditions(
conditions: Array<{ id: string; title: string; value: string }>,
outgoingConnections: Array<{ source: string; target: string; sourceHandle?: string }>,
evalContext: Record<string, any>,
ctx: ExecutionContext,
block: SerializedBlock
): Promise<{
selectedConnection: { target: string; sourceHandle?: string }
selectedCondition: { id: string; title: string; value: string }
}> {
for (const condition of conditions) {
if (condition.title === CONDITION.ELSE_TITLE) {
const connection = this.findConnectionForCondition(outgoingConnections, condition.id)
if (connection) {
return { selectedConnection: connection, selectedCondition: condition }
}
continue
}
// 2. Evaluate the condition using the shared evaluation function
const conditionValueString = String(condition.value || '')
try {
const conditionMet = await evaluateConditionExpression(
ctx,
conditionValueString,
context,
block,
this.resolver,
evalContext
)
logger.info(`Condition "${condition.title}" (${condition.id}) met: ${conditionMet}`)
// Find connection for this condition
const connection = outgoingConnections?.find(
(conn) => conn.sourceHandle === `condition-${condition.id}`
) as { target: string; sourceHandle?: string } | undefined
const connection = this.findConnectionForCondition(outgoingConnections, condition.id)
if (connection && conditionMet) {
selectedConnection = connection
selectedCondition = condition
break // Found the first matching condition
return { selectedConnection: connection, selectedCondition: condition }
}
} catch (error: any) {
logger.error(`Failed to evaluate condition "${condition.title}": ${error.message}`)
@@ -185,57 +198,29 @@ export class ConditionBlockHandler implements BlockHandler {
}
}
// Handle case where no condition was met (should only happen if no 'else' exists)
if (!selectedConnection || !selectedCondition) {
// Check if an 'else' block exists but wasn't selected (shouldn't happen with current logic)
const elseCondition = conditions.find((c) => c.title === 'else')
if (elseCondition) {
logger.warn(`No condition met, but an 'else' block exists. Selecting 'else' path.`, {
blockId: block.id,
})
const elseConnection = outgoingConnections?.find(
(conn) => conn.sourceHandle === `condition-${elseCondition.id}`
) as { target: string; sourceHandle?: string } | undefined
if (elseConnection) {
selectedConnection = elseConnection
selectedCondition = elseCondition
} else {
throw new Error(
`No path found for condition block "${block.metadata?.name}", and 'else' connection missing.`
)
}
} else {
throw new Error(
`No matching path found for condition block "${block.metadata?.name}", and no 'else' block exists.`
)
const elseCondition = conditions.find((c) => c.title === CONDITION.ELSE_TITLE)
if (elseCondition) {
logger.warn(`No condition met, selecting 'else' path`, { blockId: block.id })
const elseConnection = this.findConnectionForCondition(outgoingConnections, elseCondition.id)
if (elseConnection) {
return { selectedConnection: elseConnection, selectedCondition: elseCondition }
}
throw new Error(
`No path found for condition block "${block.metadata?.name}", and 'else' connection missing.`
)
}
// Find target block
const targetBlock = context.workflow?.blocks.find((b) => b.id === selectedConnection?.target)
if (!targetBlock) {
throw new Error(`Target block ${selectedConnection?.target} not found`)
}
// Log the decision
logger.info(
`Condition block ${block.id} selected path: ${selectedCondition.title} (${selectedCondition.id}) -> ${targetBlock.metadata?.name || targetBlock.id}`
throw new Error(
`No matching path found for condition block "${block.metadata?.name}", and no 'else' block exists.`
)
}
// Update context decisions - use virtual block ID if available (for parallel execution)
const decisionKey = context.currentVirtualBlockId || block.id
context.decisions.condition.set(decisionKey, selectedCondition.id)
// Return output, preserving source output structure if possible
return {
...((sourceOutput as any) || {}), // Keep original fields if they exist
conditionResult: true, // Indicate a path was successfully chosen
selectedPath: {
blockId: targetBlock.id,
blockType: targetBlock.metadata?.id || 'unknown',
blockTitle: targetBlock.metadata?.name || 'Untitled Block',
},
selectedConditionId: selectedCondition.id,
}
private findConnectionForCondition(
connections: Array<{ source: string; target: string; sourceHandle?: string }>,
conditionId: string
): { target: string; sourceHandle?: string } | undefined {
return connections.find(
(conn) => conn.sourceHandle === `${EDGE.CONDITION_PREFIX}${conditionId}`
)
}
}

View File

@@ -86,7 +86,7 @@ describe('EvaluatorBlockHandler', () => {
temperature: 0.1,
}
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('gpt-4o')
expect(mockFetch).toHaveBeenCalledWith(
@@ -154,7 +154,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
@@ -186,7 +186,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
const fetchCallArgs = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCallArgs[1].body)
@@ -215,7 +215,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect((result as any).quality).toBe(9)
})
@@ -240,7 +240,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect((result as any).score).toBe(0)
})
@@ -268,7 +268,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect((result as any).accuracy).toBe(0)
expect((result as any).fluency).toBe(0)
})
@@ -293,7 +293,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect((result as any).camelcasescore).toBe(7)
})
@@ -321,7 +321,7 @@ describe('EvaluatorBlockHandler', () => {
})
})
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect((result as any).presentscore).toBe(4)
expect((result as any).missingscore).toBe(0)
@@ -339,6 +339,6 @@ describe('EvaluatorBlockHandler', () => {
})
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow('Server error')
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow('Server error')
})
})

View File

@@ -1,8 +1,9 @@
import { createLogger } from '@/lib/logs/console/logger'
import { getBaseUrl } from '@/lib/urls/utils'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import { BlockType, DEFAULTS, EVALUATOR, HTTP } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import { buildAPIUrl, extractAPIErrorMessage } from '@/executor/utils/http'
import { isJSONString, parseJSON, stringifyJSON } from '@/executor/utils/json'
import { calculateCost, getProviderFromModel } from '@/providers/utils'
import type { SerializedBlock } from '@/serializer/types'
@@ -17,40 +18,17 @@ export class EvaluatorBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput> {
const evaluatorConfig = {
model: inputs.model || 'gpt-4o',
model: inputs.model || EVALUATOR.DEFAULT_MODEL,
apiKey: inputs.apiKey,
}
const providerId = getProviderFromModel(evaluatorConfig.model)
// Process the content to ensure it's in a suitable format
let processedContent = ''
try {
if (typeof inputs.content === 'string') {
if (inputs.content.trim().startsWith('[') || inputs.content.trim().startsWith('{')) {
try {
const parsed = JSON.parse(inputs.content)
processedContent = JSON.stringify(parsed, null, 2)
} catch (_e) {
processedContent = inputs.content
}
} else {
processedContent = inputs.content
}
} else if (typeof inputs.content === 'object') {
processedContent = JSON.stringify(inputs.content, null, 2)
} else {
processedContent = String(inputs.content || '')
}
} catch (e) {
logger.error('Error processing content:', e)
processedContent = String(inputs.content || '')
}
const processedContent = this.processContent(inputs.content)
// Parse system prompt object with robust error handling
let systemPromptObj: { systemPrompt: string; responseFormat: any } = {
@@ -88,11 +66,10 @@ export class EvaluatorBlockHandler implements BlockHandler {
Return a JSON object with each metric name as a key and a numeric score as the value. No explanations, only scores.`,
responseFormat: {
name: 'evaluation_response',
name: EVALUATOR.RESPONSE_SCHEMA_NAME,
schema: {
type: 'object',
properties: responseProperties,
// Filter out invalid names before creating the required array
required: metrics.filter((m: any) => m?.name).map((m: any) => m.name.toLowerCase()),
additionalProperties: false,
},
@@ -107,159 +84,59 @@ export class EvaluatorBlockHandler implements BlockHandler {
}
try {
const url = new URL('/api/providers', getBaseUrl())
const url = buildAPIUrl('/api/providers')
// Make sure we force JSON output in the request
const providerRequest = {
provider: providerId,
model: evaluatorConfig.model,
systemPrompt: systemPromptObj.systemPrompt,
responseFormat: systemPromptObj.responseFormat,
context: JSON.stringify([
context: stringifyJSON([
{
role: 'user',
content:
'Please evaluate the content provided in the system prompt. Return ONLY a valid JSON with metric scores.',
},
]),
temperature: 0.1,
temperature: EVALUATOR.DEFAULT_TEMPERATURE,
apiKey: evaluatorConfig.apiKey,
workflowId: context.workflowId,
workflowId: ctx.workflowId,
}
const response = await fetch(url.toString(), {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': HTTP.CONTENT_TYPE.JSON,
},
body: JSON.stringify(providerRequest),
body: stringifyJSON(providerRequest),
})
if (!response.ok) {
// Try to extract a helpful error message
let errorMessage = `Provider API request failed with status ${response.status}`
try {
const errorData = await response.json()
if (errorData.error) {
errorMessage = errorData.error
}
} catch (_e) {
// If JSON parsing fails, use the original error message
}
const errorMessage = await extractAPIErrorMessage(response)
throw new Error(errorMessage)
}
const result = await response.json()
// Parse response content with robust error handling
let parsedContent: Record<string, any> = {}
try {
const contentStr = result.content.trim()
let jsonStr = ''
const parsedContent = this.extractJSONFromResponse(result.content)
// Method 1: Extract content between first { and last }
const fullMatch = contentStr.match(/(\{[\s\S]*\})/) // Regex to find JSON structure
if (fullMatch) {
jsonStr = fullMatch[0]
}
// Method 2: Try to find and extract just the JSON part
else if (contentStr.includes('{') && contentStr.includes('}')) {
const startIdx = contentStr.indexOf('{')
const endIdx = contentStr.lastIndexOf('}') + 1
jsonStr = contentStr.substring(startIdx, endIdx)
}
// Method 3: Just use the raw content as a last resort
else {
jsonStr = contentStr
}
const metricScores = this.extractMetricScores(parsedContent, inputs.metrics)
// Try to parse the extracted JSON
try {
parsedContent = JSON.parse(jsonStr)
} catch (parseError) {
logger.error('Failed to parse extracted JSON:', parseError)
throw new Error('Invalid JSON in response')
}
} catch (error) {
logger.error('Error parsing evaluator response:', error)
logger.error('Raw response content:', result.content)
// Fallback to empty object
parsedContent = {}
}
// Extract and process metric scores with proper validation
const metricScores: Record<string, any> = {}
try {
// Ensure metrics is an array before processing
const validMetrics = Array.isArray(inputs.metrics) ? inputs.metrics : []
// If we have a successful parse, extract the metrics
if (Object.keys(parsedContent).length > 0) {
validMetrics.forEach((metric: any) => {
// Check if metric and name are valid before proceeding
if (!metric || !metric.name) {
logger.warn('Skipping invalid metric entry during score extraction:', metric)
return // Skip this iteration
}
const metricName = metric.name
const lowerCaseMetricName = metricName.toLowerCase()
// Try multiple possible ways the metric might be represented
if (parsedContent[metricName] !== undefined) {
metricScores[lowerCaseMetricName] = Number(parsedContent[metricName])
} else if (parsedContent[metricName.toLowerCase()] !== undefined) {
metricScores[lowerCaseMetricName] = Number(parsedContent[metricName.toLowerCase()])
} else if (parsedContent[metricName.toUpperCase()] !== undefined) {
metricScores[lowerCaseMetricName] = Number(parsedContent[metricName.toUpperCase()])
} else {
// Last resort - try to find any key that might contain this metric name
const matchingKey = Object.keys(parsedContent).find((key) => {
// Add check for key validity before calling toLowerCase()
return typeof key === 'string' && key.toLowerCase().includes(lowerCaseMetricName)
})
if (matchingKey) {
metricScores[lowerCaseMetricName] = Number(parsedContent[matchingKey])
} else {
logger.warn(`Metric "${metricName}" not found in LLM response`)
metricScores[lowerCaseMetricName] = 0
}
}
})
} else {
// If we couldn't parse any content, set all metrics to 0
validMetrics.forEach((metric: any) => {
// Ensure metric and name are valid before setting default score
if (metric?.name) {
metricScores[metric.name.toLowerCase()] = 0
} else {
logger.warn('Skipping invalid metric entry when setting default scores:', metric)
}
})
}
} catch (e) {
logger.error('Error extracting metric scores:', e)
}
// Calculate cost based on token usage, similar to how providers do it
const costCalculation = calculateCost(
result.model,
result.tokens?.prompt || 0,
result.tokens?.completion || 0,
false // Evaluator blocks don't typically use cached input
result.tokens?.prompt || DEFAULTS.TOKENS.PROMPT,
result.tokens?.completion || DEFAULTS.TOKENS.COMPLETION,
false
)
// Create result with metrics as direct fields for easy access
const outputResult = {
return {
content: inputs.content,
model: result.model,
tokens: {
prompt: result.tokens?.prompt || 0,
completion: result.tokens?.completion || 0,
total: result.tokens?.total || 0,
prompt: result.tokens?.prompt || DEFAULTS.TOKENS.PROMPT,
completion: result.tokens?.completion || DEFAULTS.TOKENS.COMPLETION,
total: result.tokens?.total || DEFAULTS.TOKENS.TOTAL,
},
cost: {
input: costCalculation.input,
@@ -268,11 +145,101 @@ export class EvaluatorBlockHandler implements BlockHandler {
},
...metricScores,
}
return outputResult
} catch (error) {
logger.error('Evaluator execution failed:', error)
throw error
}
}
private processContent(content: any): string {
if (typeof content === 'string') {
if (isJSONString(content)) {
const parsed = parseJSON(content, null)
return parsed ? stringifyJSON(parsed) : content
}
return content
}
if (typeof content === 'object') {
return stringifyJSON(content)
}
return String(content || '')
}
private extractJSONFromResponse(responseContent: string): Record<string, any> {
try {
const contentStr = responseContent.trim()
const fullMatch = contentStr.match(/(\{[\s\S]*\})/)
if (fullMatch) {
return parseJSON(fullMatch[0], {})
}
if (contentStr.includes('{') && contentStr.includes('}')) {
const startIdx = contentStr.indexOf('{')
const endIdx = contentStr.lastIndexOf('}') + 1
const jsonStr = contentStr.substring(startIdx, endIdx)
return parseJSON(jsonStr, {})
}
return parseJSON(contentStr, {})
} catch (error) {
logger.error('Error parsing evaluator response:', error)
logger.error('Raw response content:', responseContent)
return {}
}
}
private extractMetricScores(
parsedContent: Record<string, any>,
metrics: any
): Record<string, number> {
const metricScores: Record<string, number> = {}
const validMetrics = Array.isArray(metrics) ? metrics : []
if (Object.keys(parsedContent).length === 0) {
validMetrics.forEach((metric: any) => {
if (metric?.name) {
metricScores[metric.name.toLowerCase()] = DEFAULTS.EXECUTION_TIME
}
})
return metricScores
}
validMetrics.forEach((metric: any) => {
if (!metric?.name) {
logger.warn('Skipping invalid metric entry:', metric)
return
}
const score = this.findMetricScore(parsedContent, metric.name)
metricScores[metric.name.toLowerCase()] = score
})
return metricScores
}
private findMetricScore(parsedContent: Record<string, any>, metricName: string): number {
const lowerMetricName = metricName.toLowerCase()
if (parsedContent[metricName] !== undefined) {
return Number(parsedContent[metricName])
}
if (parsedContent[lowerMetricName] !== undefined) {
return Number(parsedContent[lowerMetricName])
}
const matchingKey = Object.keys(parsedContent).find((key) => {
return typeof key === 'string' && key.toLowerCase() === lowerMetricName
})
if (matchingKey) {
return Number(parsedContent[matchingKey])
}
logger.warn(`Metric "${metricName}" not found in LLM response`)
return DEFAULTS.EXECUTION_TIME
}
}

View File

@@ -87,7 +87,7 @@ describe('FunctionBlockHandler', () => {
}
const expectedOutput: any = { result: 'Success' }
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
@@ -121,7 +121,7 @@ describe('FunctionBlockHandler', () => {
}
const expectedOutput: any = { result: 'Success' }
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
@@ -147,7 +147,7 @@ describe('FunctionBlockHandler', () => {
_context: { workflowId: mockContext.workflowId, workspaceId: mockContext.workspaceId },
}
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
@@ -163,7 +163,7 @@ describe('FunctionBlockHandler', () => {
const errorResult = { success: false, error: 'Function execution failed: Code failed' }
mockExecuteTool.mockResolvedValue(errorResult)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Function execution failed: Code failed'
)
expect(mockExecuteTool).toHaveBeenCalled()
@@ -174,7 +174,7 @@ describe('FunctionBlockHandler', () => {
const errorResult = { success: false }
mockExecuteTool.mockResolvedValue(errorResult)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Function execution failed'
)
})

View File

@@ -3,38 +3,12 @@ import { DEFAULT_CODE_LANGUAGE } from '@/lib/execution/languages'
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import { collectBlockData } from '@/executor/utils/block-data'
import type { SerializedBlock } from '@/serializer/types'
import { executeTool } from '@/tools'
const logger = createLogger('FunctionBlockHandler')
/**
* Helper function to collect runtime block outputs and name mappings
* for tag resolution in function execution
*/
function collectBlockData(context: ExecutionContext): {
blockData: Record<string, any>
blockNameMapping: Record<string, string>
} {
const blockData: Record<string, any> = {}
const blockNameMapping: Record<string, string> = {}
for (const [id, state] of context.blockStates.entries()) {
if (state.output !== undefined) {
blockData[id] = state.output
const workflowBlock = context.workflow?.blocks?.find((b) => b.id === id)
if (workflowBlock?.metadata?.name) {
// Map both the display name and normalized form
blockNameMapping[workflowBlock.metadata.name] = id
const normalized = workflowBlock.metadata.name.replace(/\s+/g, '').toLowerCase()
blockNameMapping[normalized] = id
}
}
}
return { blockData, blockNameMapping }
}
/**
* Handler for Function blocks that execute custom code.
*/
@@ -44,18 +18,16 @@ export class FunctionBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<any> {
const codeContent = Array.isArray(inputs.code)
? inputs.code.map((c: { content: string }) => c.content).join('\n')
: inputs.code
// Extract block data for variable resolution
const { blockData, blockNameMapping } = collectBlockData(context)
const { blockData, blockNameMapping } = collectBlockData(ctx)
// Directly use the function_execute tool which calls the API route
const result = await executeTool(
'function_execute',
{
@@ -63,18 +35,18 @@ export class FunctionBlockHandler implements BlockHandler {
language: inputs.language || DEFAULT_CODE_LANGUAGE,
useLocalVM: !inputs.remoteExecution,
timeout: inputs.timeout || DEFAULT_EXECUTION_TIMEOUT_MS,
envVars: context.environmentVariables || {},
workflowVariables: context.workflowVariables || {},
blockData: blockData, // Pass block data for variable resolution
blockNameMapping: blockNameMapping, // Pass block name to ID mapping
envVars: ctx.environmentVariables || {},
workflowVariables: ctx.workflowVariables || {},
blockData,
blockNameMapping,
_context: {
workflowId: context.workflowId,
workspaceId: context.workspaceId,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
},
},
false, // skipProxy
false, // skipPostProcess
context // execution context for file processing
false,
false,
ctx
)
if (!result.success) {

View File

@@ -90,7 +90,7 @@ describe('GenericBlockHandler', () => {
}
const expectedOutput: any = { customResult: 'OK' }
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetTool).toHaveBeenCalledWith('some_custom_tool')
expect(mockExecuteTool).toHaveBeenCalledWith(
@@ -109,7 +109,7 @@ describe('GenericBlockHandler', () => {
// Override mock to return undefined for this test
mockGetTool.mockImplementation(() => undefined)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Tool not found: some_custom_tool'
)
expect(mockExecuteTool).not.toHaveBeenCalled()
@@ -124,13 +124,13 @@ describe('GenericBlockHandler', () => {
}
mockExecuteTool.mockResolvedValue(errorResult)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Custom tool failed'
)
// Re-execute to check error properties after catching
try {
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
} catch (e: any) {
expect(e.toolId).toBe('some_custom_tool')
expect(e.blockName).toBe('Test Generic Block')
@@ -145,7 +145,7 @@ describe('GenericBlockHandler', () => {
const errorResult = { success: false, output: {} }
mockExecuteTool.mockResolvedValue(errorResult)
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Block execution of Some Custom Tool failed with no error message'
)
})
@@ -203,7 +203,7 @@ describe('GenericBlockHandler', () => {
mockExecuteTool.mockResolvedValue(mockToolResponse)
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
// Verify cost information is restructured correctly for enhanced logging
expect(result).toEqual({
@@ -270,7 +270,7 @@ describe('GenericBlockHandler', () => {
mockExecuteTool.mockResolvedValue(mockToolResponse)
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
// Verify cost information is restructured correctly
expect(result).toEqual({
@@ -309,7 +309,7 @@ describe('GenericBlockHandler', () => {
mockExecuteTool.mockResolvedValue(mockToolResponse)
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
// Should return original output without cost transformation
expect(result).toEqual({
@@ -348,7 +348,7 @@ describe('GenericBlockHandler', () => {
mockExecuteTool.mockResolvedValue(mockToolResponse)
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
// Should return original output without cost transformation
expect(result).toEqual({

View File

@@ -7,21 +7,15 @@ import { getTool } from '@/tools/utils'
const logger = createLogger('GenericBlockHandler')
/**
* Generic handler for any block types not covered by specialized handlers.
* Acts as a fallback for custom or future block types.
*/
export class GenericBlockHandler implements BlockHandler {
canHandle(block: SerializedBlock): boolean {
// This handler can handle any block type
// It should be the last handler checked.
return true
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<any> {
logger.info(`Executing block: ${block.id} (Type: ${block.metadata?.id})`)
@@ -56,19 +50,27 @@ export class GenericBlockHandler implements BlockHandler {
}
}
logger.info(`[GenericBlockHandler] Calling executeTool for ${block.config.tool}`, {
blockId: block.id,
blockName: block.metadata?.name,
originalInputs: inputs,
finalInputs: finalInputs,
tool: block.config.tool,
})
try {
const result = await executeTool(
block.config.tool,
{
...finalInputs,
_context: {
workflowId: context.workflowId,
workspaceId: context.workspaceId,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
},
},
false, // skipProxy
false, // skipPostProcess
context // execution context for file processing
false,
false,
ctx
)
if (!result.success) {

View File

@@ -4,8 +4,6 @@ import { ConditionBlockHandler } from '@/executor/handlers/condition/condition-h
import { EvaluatorBlockHandler } from '@/executor/handlers/evaluator/evaluator-handler'
import { FunctionBlockHandler } from '@/executor/handlers/function/function-handler'
import { GenericBlockHandler } from '@/executor/handlers/generic/generic-handler'
import { LoopBlockHandler } from '@/executor/handlers/loop/loop-handler'
import { ParallelBlockHandler } from '@/executor/handlers/parallel/parallel-handler'
import { ResponseBlockHandler } from '@/executor/handlers/response/response-handler'
import { RouterBlockHandler } from '@/executor/handlers/router/router-handler'
import { TriggerBlockHandler } from '@/executor/handlers/trigger/trigger-handler'
@@ -20,8 +18,6 @@ export {
EvaluatorBlockHandler,
FunctionBlockHandler,
GenericBlockHandler,
LoopBlockHandler,
ParallelBlockHandler,
ResponseBlockHandler,
RouterBlockHandler,
TriggerBlockHandler,

View File

@@ -1,252 +0,0 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockType } from '@/executor/consts'
import { LoopBlockHandler } from '@/executor/handlers/loop/loop-handler'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
describe('LoopBlockHandler', () => {
let handler: LoopBlockHandler
let mockContext: ExecutionContext
let mockBlock: SerializedBlock
const mockPathTracker = {
isInActivePath: vi.fn(),
}
beforeEach(() => {
handler = new LoopBlockHandler()
mockBlock = {
id: 'loop-1',
position: { x: 0, y: 0 },
config: { tool: BlockType.LOOP, params: {} },
inputs: {},
outputs: {},
metadata: { id: BlockType.LOOP, name: 'Test Loop' },
enabled: true,
}
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow: {
version: '1.0',
blocks: [mockBlock],
connections: [
{
source: 'loop-1',
target: 'inner-block',
sourceHandle: 'loop-start-source',
},
{
source: 'loop-1',
target: 'after-loop',
sourceHandle: 'loop-end-source',
},
],
loops: {
'loop-1': {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 3,
loopType: 'for',
},
},
},
}
})
describe('canHandle', () => {
it('should handle loop blocks', () => {
expect(handler.canHandle(mockBlock)).toBe(true)
})
it('should not handle non-loop blocks', () => {
if (mockBlock.metadata) {
mockBlock.metadata.id = BlockType.FUNCTION
}
expect(handler.canHandle(mockBlock)).toBe(false)
})
})
describe('execute', () => {
it('should initialize loop on first execution', async () => {
const result = await handler.execute(mockBlock, {}, mockContext)
expect(mockContext.loopIterations.get('loop-1')).toBe(1)
expect(mockContext.activeExecutionPath.has('inner-block')).toBe(true)
if (typeof result === 'object' && result !== null) {
const response = result as any
expect(response.currentIteration).toBe(1)
expect(response.maxIterations).toBe(3)
expect(response.completed).toBe(false)
}
})
it('should activate loop-end-source when iterations complete', async () => {
mockContext.loopIterations.set('loop-1', 4)
const result = await handler.execute(mockBlock, {}, mockContext)
expect(mockContext.completedLoops.has('loop-1')).toBe(false)
expect(mockContext.activeExecutionPath.has('after-loop')).toBe(false)
expect(mockContext.activeExecutionPath.has('inner-block')).toBe(false)
if (typeof result === 'object' && result !== null) {
const response = result as any
expect(response.completed).toBe(false)
expect(response.message).toContain('Final iteration')
}
})
it('should handle forEach loops with array items', async () => {
mockContext.workflow!.loops['loop-1'] = {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 10,
loopType: 'forEach',
forEachItems: ['item1', 'item2', 'item3'],
}
const result = await handler.execute(mockBlock, {}, mockContext)
expect(mockContext.loopItems.get('loop-1')).toBe('item1')
if (typeof result === 'object' && result !== null) {
const response = result as any
expect(response.loopType).toBe('forEach')
expect(response.maxIterations).toBe(3)
}
})
it('should handle forEach loops with object items', async () => {
mockContext.workflow!.loops['loop-1'] = {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 10,
loopType: 'forEach',
forEachItems: { key1: 'value1', key2: 'value2' },
}
await handler.execute(mockBlock, {}, mockContext)
const currentItem = mockContext.loopItems.get('loop-1')
expect(Array.isArray(currentItem)).toBe(true)
expect((currentItem as any)[0]).toBe('key1')
expect((currentItem as any)[1]).toBe('value1')
})
it('should limit forEach loops by collection size, not iterations parameter', async () => {
mockContext.workflow!.loops['loop-1'] = {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 10,
loopType: 'forEach',
forEachItems: ['a', 'b'],
}
let result = await handler.execute(mockBlock, {}, mockContext)
expect(mockContext.loopIterations.get('loop-1')).toBe(1)
expect(mockContext.loopItems.get('loop-1')).toBe('a')
if (typeof result === 'object' && result !== null) {
const response = result as any
expect(response.maxIterations).toBe(2)
expect(response.completed).toBe(false)
}
mockContext.loopIterations.set('loop-1', 2)
result = await handler.execute(mockBlock, {}, mockContext)
expect(mockContext.loopIterations.get('loop-1')).toBe(2)
expect(mockContext.loopItems.get('loop-1')).toBe('b')
if (typeof result === 'object' && result !== null) {
const response = result as any
expect(response.completed).toBe(false)
}
// Manually increment iteration for third execution (exceeds max)
mockContext.loopIterations.set('loop-1', 3)
// Third execution should exceed the loop limit
result = await handler.execute(mockBlock, {}, mockContext)
// The loop handler no longer marks loops as completed - that's handled by the loop manager
expect(mockContext.completedLoops.has('loop-1')).toBe(false)
})
it('should throw error for forEach loops without collection', async () => {
mockContext.workflow!.loops['loop-1'] = {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 5,
loopType: 'forEach',
forEachItems: '',
}
await expect(handler.execute(mockBlock, {}, mockContext)).rejects.toThrow(
'forEach loop "loop-1" requires a collection to iterate over'
)
})
it('should throw error for forEach loops with empty collection', async () => {
mockContext.workflow!.loops['loop-1'] = {
id: 'loop-1',
nodes: ['inner-block'],
iterations: 5,
loopType: 'forEach',
forEachItems: [],
}
await expect(handler.execute(mockBlock, {}, mockContext)).rejects.toThrow(
'forEach loop "loop-1" collection is empty or invalid'
)
})
})
describe('PathTracker integration', () => {
it('should activate children when in active path', async () => {
const handlerWithPathTracker = new LoopBlockHandler(undefined, mockPathTracker as any)
mockPathTracker.isInActivePath.mockReturnValue(true)
await handlerWithPathTracker.execute(mockBlock, {}, mockContext)
expect(mockContext.activeExecutionPath.has('inner-block')).toBe(true)
expect(mockPathTracker.isInActivePath).toHaveBeenCalledWith('loop-1', mockContext)
})
it('should not activate children when not in active path', async () => {
const handlerWithPathTracker = new LoopBlockHandler(undefined, mockPathTracker as any)
mockPathTracker.isInActivePath.mockReturnValue(false)
await handlerWithPathTracker.execute(mockBlock, {}, mockContext)
expect(mockContext.activeExecutionPath.has('inner-block')).toBe(false)
expect(mockPathTracker.isInActivePath).toHaveBeenCalledWith('loop-1', mockContext)
})
it('should handle PathTracker errors gracefully', async () => {
const handlerWithPathTracker = new LoopBlockHandler(undefined, mockPathTracker as any)
mockPathTracker.isInActivePath.mockImplementation(() => {
throw new Error('PathTracker error')
})
await handlerWithPathTracker.execute(mockBlock, {}, mockContext)
expect(mockContext.activeExecutionPath.has('inner-block')).toBe(true)
})
})
})

View File

@@ -1,339 +0,0 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import { evaluateConditionExpression } from '@/executor/handlers/condition/condition-handler'
import type { PathTracker } from '@/executor/path/path'
import type { InputResolver } from '@/executor/resolver/resolver'
import { Routing } from '@/executor/routing/routing'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
const logger = createLogger('LoopBlockHandler')
const DEFAULT_MAX_ITERATIONS = 5
/**
* Handler for loop blocks that manage iteration control and flow.
* Loop blocks don't execute logic themselves but control the flow of blocks within them.
*/
export class LoopBlockHandler implements BlockHandler {
constructor(
private resolver?: InputResolver,
private pathTracker?: PathTracker
) {}
canHandle(block: SerializedBlock): boolean {
return block.metadata?.id === BlockType.LOOP
}
async execute(
block: SerializedBlock,
_inputs: Record<string, any>,
context: ExecutionContext
): Promise<BlockOutput> {
logger.info(`Executing loop block: ${block.id}`)
const loop = context.workflow?.loops?.[block.id]
if (!loop) {
logger.error(`Loop configuration not found for block ${block.id}`, {
blockId: block.id,
availableLoops: Object.keys(context.workflow?.loops || {}),
workflowLoops: context.workflow?.loops,
})
throw new Error(`Loop configuration not found for block ${block.id}`)
}
if (!context.loopIterations.has(block.id)) {
context.loopIterations.set(block.id, 1)
logger.info(`Initialized loop ${block.id} starting at iteration 1`)
}
const currentIteration = context.loopIterations.get(block.id) || 1
let maxIterations: number
let forEachItems: any[] | Record<string, any> | null = null
let shouldContinueLoop = true
if (loop.loopType === 'forEach') {
if (
!loop.forEachItems ||
(typeof loop.forEachItems === 'string' && loop.forEachItems.trim() === '')
) {
throw new Error(
`forEach loop "${block.id}" requires a collection to iterate over. Please provide an array or object in the collection field.`
)
}
forEachItems = await this.evaluateForEachItems(loop.forEachItems, context, block)
logger.info(`Evaluated forEach items for loop ${block.id}:`, forEachItems)
if (
!forEachItems ||
(Array.isArray(forEachItems) && forEachItems.length === 0) ||
(typeof forEachItems === 'object' && Object.keys(forEachItems).length === 0)
) {
throw new Error(
`forEach loop "${block.id}" collection is empty or invalid. Please provide a non-empty array or object.`
)
}
const itemsLength = Array.isArray(forEachItems)
? forEachItems.length
: Object.keys(forEachItems).length
maxIterations = itemsLength
logger.info(
`forEach loop ${block.id} - Items: ${itemsLength}, Max iterations: ${maxIterations}`
)
} else if (loop.loopType === 'while' || loop.loopType === 'doWhile') {
// For while and doWhile loops, set loop context BEFORE evaluating condition
// This makes variables like index, currentIteration available in the condition
const loopContext = {
index: currentIteration - 1, // 0-based index
currentIteration, // 1-based iteration number
}
context.loopItems.set(block.id, loopContext)
// Evaluate the condition to determine if we should continue
if (!loop.whileCondition || loop.whileCondition.trim() === '') {
throw new Error(
`${loop.loopType} loop "${block.id}" requires a condition expression. Please provide a valid JavaScript expression.`
)
}
// For doWhile loops, skip condition evaluation on the first iteration
// For while loops, always evaluate the condition
if (loop.loopType === 'doWhile' && currentIteration === 1) {
shouldContinueLoop = true
} else {
// Evaluate the condition at the start of each iteration
try {
if (!this.resolver) {
throw new Error('Resolver is required for while/doWhile loop condition evaluation')
}
shouldContinueLoop = await evaluateConditionExpression(
loop.whileCondition,
context,
block,
this.resolver
)
} catch (error: any) {
throw new Error(
`Failed to evaluate ${loop.loopType} loop condition for "${block.id}": ${error.message}`
)
}
}
// No max iterations for while/doWhile - rely on condition and workflow timeout
maxIterations = Number.MAX_SAFE_INTEGER
} else {
maxIterations = loop.iterations || DEFAULT_MAX_ITERATIONS
logger.info(`For loop ${block.id} - Max iterations: ${maxIterations}`)
}
logger.info(
`Loop ${block.id} - Current iteration: ${currentIteration}, Max iterations: ${maxIterations}, Should continue: ${shouldContinueLoop}`
)
// For while and doWhile loops, check if the condition is false
if ((loop.loopType === 'while' || loop.loopType === 'doWhile') && !shouldContinueLoop) {
// Mark the loop as completed
context.completedLoops.add(block.id)
// Remove any activated loop-start paths since we're not continuing
const loopStartConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'loop-start-source'
) || []
for (const conn of loopStartConnections) {
context.activeExecutionPath.delete(conn.target)
}
// Activate the loop-end connections (blocks after the loop)
const loopEndConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'loop-end-source'
) || []
for (const conn of loopEndConnections) {
context.activeExecutionPath.add(conn.target)
}
return {
loopId: block.id,
currentIteration,
maxIterations,
loopType: loop.loopType,
completed: true,
message: `${loop.loopType === 'doWhile' ? 'Do-While' : 'While'} loop completed after ${currentIteration} iterations (condition became false)`,
} as Record<string, any>
}
// Only check max iterations for for/forEach loops (while/doWhile have no limit)
if (
(loop.loopType === 'for' || loop.loopType === 'forEach') &&
currentIteration > maxIterations
) {
logger.info(`Loop ${block.id} has reached maximum iterations (${maxIterations})`)
return {
loopId: block.id,
currentIteration: currentIteration - 1, // Report the actual last iteration number
maxIterations,
loopType: loop.loopType || 'for',
completed: false, // Not completed until all blocks in this iteration execute
message: `Final iteration ${currentIteration} of ${maxIterations}`,
} as Record<string, any>
}
if (loop.loopType === 'forEach' && forEachItems) {
context.loopItems.set(`${block.id}_items`, forEachItems)
const arrayIndex = currentIteration - 1
const currentItem = Array.isArray(forEachItems)
? forEachItems[arrayIndex]
: Object.entries(forEachItems)[arrayIndex]
context.loopItems.set(block.id, currentItem)
logger.info(
`Loop ${block.id} - Set current item for iteration ${currentIteration} (index ${arrayIndex}):`,
currentItem
)
}
// Use routing strategy to determine if this block requires active path checking
const blockType = block.metadata?.id
if (Routing.requiresActivePathCheck(blockType || '')) {
let isInActivePath = true
if (this.pathTracker) {
try {
isInActivePath = this.pathTracker.isInActivePath(block.id, context)
} catch (error) {
logger.warn(`PathTracker check failed for ${blockType} block ${block.id}:`, error)
isInActivePath = true
}
}
if (isInActivePath) {
this.activateChildNodes(block, context, currentIteration)
} else {
logger.info(
`${blockType} block ${block.id} is not in active execution path, skipping child activation`
)
}
} else {
this.activateChildNodes(block, context, currentIteration)
}
// For while/doWhile loops, now that condition is confirmed true, reset child blocks and increment counter
if (loop.loopType === 'while' || loop.loopType === 'doWhile') {
// Reset all child blocks for this iteration
for (const nodeId of loop.nodes || []) {
context.executedBlocks.delete(nodeId)
context.blockStates.delete(nodeId)
context.activeExecutionPath.delete(nodeId)
context.decisions.router.delete(nodeId)
context.decisions.condition.delete(nodeId)
}
// Increment the counter for the next iteration
context.loopIterations.set(block.id, currentIteration + 1)
} else {
// For for/forEach loops, keep the counter value - it will be managed by the loop manager
context.loopIterations.set(block.id, currentIteration)
}
return {
loopId: block.id,
currentIteration,
maxIterations,
loopType: loop.loopType || 'for',
completed: false,
message: `Starting iteration ${currentIteration} of ${maxIterations}`,
} as Record<string, any>
}
/**
* Activate child nodes for loop execution
*/
private activateChildNodes(
block: SerializedBlock,
context: ExecutionContext,
currentIteration: number
): void {
const loopStartConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'loop-start-source'
) || []
for (const conn of loopStartConnections) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated loop start path to ${conn.target} for iteration ${currentIteration}`)
}
}
/**
* Evaluates forEach items expression or value
*/
private async evaluateForEachItems(
forEachItems: any,
context: ExecutionContext,
block: SerializedBlock
): Promise<any[] | Record<string, any> | null> {
// If already an array or object, return as-is
if (
Array.isArray(forEachItems) ||
(typeof forEachItems === 'object' && forEachItems !== null)
) {
return forEachItems
}
// If it's a string expression, try to evaluate it
if (typeof forEachItems === 'string') {
try {
const trimmed = forEachItems.trim()
if (trimmed.startsWith('//') || trimmed === '') {
return []
}
// Try to parse as JSON first
if (trimmed.startsWith('[') || trimmed.startsWith('{')) {
try {
return JSON.parse(trimmed)
} catch {
// Continue to expression evaluation
}
}
// If we have a resolver, use it to resolve any variable references first, then block references
if (this.resolver) {
const resolvedVars = this.resolver.resolveVariableReferences(forEachItems, block)
const resolved = this.resolver.resolveBlockReferences(resolvedVars, context, block)
// Try to parse the resolved value
try {
return JSON.parse(resolved)
} catch {
// If it's not valid JSON, try to evaluate as an expression
try {
const result = new Function(`return ${resolved}`)()
if (Array.isArray(result) || (typeof result === 'object' && result !== null)) {
return result
}
} catch (e) {
logger.error(`Error evaluating forEach expression: ${resolved}`, e)
}
}
}
logger.warn(`forEach expression evaluation not fully implemented: ${forEachItems}`)
return null
} catch (error) {
logger.error(`Error evaluating forEach items:`, error)
return null
}
}
return null
}
}

View File

@@ -1,576 +0,0 @@
import { describe, expect, it, vi } from 'vitest'
import { createParallelExecutionState } from '@/executor/__test-utils__/executor-mocks'
import { BlockType } from '@/executor/consts'
import { ParallelBlockHandler } from '@/executor/handlers/parallel/parallel-handler'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedParallel } from '@/serializer/types'
describe('ParallelBlockHandler', () => {
const mockResolver = {
resolveBlockReferences: vi.fn((expr: string) => expr),
}
const mockPathTracker = {
isInActivePath: vi.fn(),
}
const createMockBlock = (id: string): SerializedBlock => ({
id,
position: { x: 0, y: 0 },
config: { tool: '', params: {} },
inputs: {},
outputs: {},
metadata: { id: BlockType.PARALLEL, name: 'Test Parallel' },
enabled: true,
})
const createMockContext = (parallel?: SerializedParallel): ExecutionContext => ({
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow: {
version: '1.0',
blocks: [],
connections: [],
loops: {},
parallels: parallel ? { [parallel.id]: parallel } : {},
},
})
it('should handle parallel blocks', () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
expect(handler.canHandle(block)).toBe(true)
const nonParallelBlock = { ...block, metadata: { id: BlockType.AGENT } }
expect(handler.canHandle(nonParallelBlock)).toBe(false)
})
it('should initialize parallel block with distribution', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1', 'api-1'],
distribution: ['item1', 'item2', 'item3'],
}
const context = createMockContext(parallel)
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
]
// First execution - initialize parallel and set up iterations
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 3,
distributionType: 'distributed',
started: true,
message: 'Initialized 3 parallel executions',
})
// Check that items were stored
expect(context.loopItems.get('parallel-1_items')).toEqual(['item1', 'item2', 'item3'])
// Check that target was activated
expect(context.activeExecutionPath.has('agent-1')).toBe(true)
// Check parallel state
const parallelState = context.parallelExecutions?.get('parallel-1')
expect(parallelState).toBeDefined()
expect(parallelState?.currentIteration).toBe(1) // Indicates activation
expect(parallelState?.parallelCount).toBe(3)
})
it('should handle waiting state when iterations are incomplete', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
}
const context = createMockContext(parallel)
context.parallelExecutions = new Map([
[
'parallel-1',
createParallelExecutionState({
parallelCount: 2,
distributionItems: ['item1', 'item2'],
completedExecutions: 0,
activeIterations: new Set([0, 1]),
currentIteration: 1,
}),
],
])
context.executedBlocks.add('parallel-1')
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
]
// Second execution - check waiting state
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 2,
completedExecutions: 0,
activeIterations: 2,
waiting: true,
message: '0 of 2 iterations completed',
})
})
it('should handle completion after all iterations', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
}
const context = createMockContext(parallel)
context.parallelExecutions = new Map([
[
'parallel-1',
createParallelExecutionState({
parallelCount: 2,
distributionItems: ['item1', 'item2'],
completedExecutions: 0,
executionResults: new Map([
['iteration_0', { 'agent-1': { result: 'result1' } }],
['iteration_1', { 'agent-1': { result: 'result2' } }],
]),
activeIterations: new Set(),
currentIteration: 1,
}),
],
])
// Mark virtual blocks as executed
context.executedBlocks.add('parallel-1')
context.executedBlocks.add('agent-1_parallel_parallel-1_iteration_0')
context.executedBlocks.add('agent-1_parallel_parallel-1_iteration_1')
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'evaluator-1',
sourceHandle: 'parallel-end-source',
},
]
// Execution after all iterations complete
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 2,
completed: true,
results: [{ 'agent-1': { result: 'result1' } }, { 'agent-1': { result: 'result2' } }],
message: 'Completed all 2 executions',
})
// Check that parallel was marked as completed
expect(context.completedLoops.has('parallel-1')).toBe(true)
// Check that post-parallel path was activated
expect(context.activeExecutionPath.has('evaluator-1')).toBe(true)
})
it('should handle object distribution', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: { key1: 'value1', key2: 'value2' },
}
const context = createMockContext(parallel)
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 2,
distributionType: 'distributed',
started: true,
})
// Check that object entries were stored correctly
expect(context.loopItems.get('parallel-1_items')).toEqual({ key1: 'value1', key2: 'value2' })
// Check parallel state
const parallelState = context.parallelExecutions?.get('parallel-1')
expect(parallelState?.distributionItems).toEqual({ key1: 'value1', key2: 'value2' })
})
it('should handle expression evaluation', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: '["a", "b", "c"]',
}
const context = createMockContext(parallel)
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 3,
distributionType: 'distributed',
})
expect(context.loopItems.get('parallel-1_items')).toEqual(['a', 'b', 'c'])
})
it('should handle parallel without distribution', async () => {
const handler = new ParallelBlockHandler(mockResolver as any)
const block = createMockBlock('parallel-1')
// Ensure block.config.params doesn't have a count
block.config.params = {}
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
}
const context = createMockContext(parallel)
const result = await handler.execute(block, {}, context)
expect(result as any).toMatchObject({
parallelId: 'parallel-1',
parallelCount: 1,
distributionType: 'count',
started: true,
message: 'Initialized 1 parallel execution',
})
// Should not have items when no distribution
expect(context.loopItems.has('parallel-1_items')).toBe(false)
})
describe('multiple downstream connections', () => {
it('should make results available to all downstream blocks', async () => {
const handler = new ParallelBlockHandler()
const parallelBlock = createMockBlock('parallel-1')
parallelBlock.config.params = {
parallelType: 'collection',
count: 3,
}
const parallel: SerializedParallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2', 'item3'],
}
const context = createMockContext(parallel)
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
{
source: 'parallel-1',
target: 'function-1',
sourceHandle: 'parallel-end-source',
},
{
source: 'parallel-1',
target: 'parallel-2',
sourceHandle: 'parallel-end-source',
},
]
// Initialize parallel
const initResult = await handler.execute(parallelBlock, {}, context)
expect((initResult as any).started).toBe(true)
expect((initResult as any).parallelCount).toBe(3)
// Simulate all virtual blocks being executed
const parallelState = context.parallelExecutions?.get('parallel-1')
expect(parallelState).toBeDefined()
// Mark all virtual blocks as executed and store results
for (let i = 0; i < 3; i++) {
const virtualBlockId = `agent-1_parallel_parallel-1_iteration_${i}`
context.executedBlocks.add(virtualBlockId)
// Store iteration results
parallelState!.executionResults.set(`iteration_${i}`, {
'agent-1': {
response: {
content: `Result from iteration ${i}`,
model: 'test-model',
},
},
})
}
// Re-execute to aggregate results
const aggregatedResult = await handler.execute(parallelBlock, {}, context)
// Verify results are aggregated
expect((aggregatedResult as any).completed).toBe(true)
expect((aggregatedResult as any).results).toHaveLength(3)
// Verify block state is stored
const blockState = context.blockStates.get('parallel-1')
expect(blockState).toBeDefined()
expect(blockState?.output.results).toHaveLength(3)
// Verify both downstream blocks are activated
expect(context.activeExecutionPath.has('function-1')).toBe(true)
expect(context.activeExecutionPath.has('parallel-2')).toBe(true)
// Verify parallel is marked as completed
expect(context.completedLoops.has('parallel-1')).toBe(true)
// Simulate downstream blocks trying to access results
// This should work without errors
const storedResults = context.blockStates.get('parallel-1')?.output.results
expect(storedResults).toBeDefined()
expect(storedResults).toHaveLength(3)
})
it('should handle reference resolution when multiple parallel blocks exist', async () => {
const handler = new ParallelBlockHandler()
// Create first parallel block
const parallel1Block = createMockBlock('parallel-1')
parallel1Block.config.params = {
parallelType: 'collection',
count: 2,
}
// Create second parallel block (even if not connected)
const parallel2Block = createMockBlock('parallel-2')
parallel2Block.config.params = {
parallelType: 'collection',
collection: '<parallel.results>', // This references the first parallel
}
// Set up context with both parallels
const context: ExecutionContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow: {
version: '1.0',
blocks: [
parallel1Block,
parallel2Block,
{
id: 'agent-1',
position: { x: 0, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
enabled: true,
},
{
id: 'function-1',
position: { x: 0, y: 0 },
config: {
tool: BlockType.FUNCTION,
params: {
code: 'return <parallel.results>;',
},
},
inputs: {},
outputs: {},
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
enabled: true,
},
],
connections: [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
{
source: 'parallel-1',
target: 'function-1',
sourceHandle: 'parallel-end-source',
},
{
source: 'parallel-1',
target: 'parallel-2',
sourceHandle: 'parallel-end-source',
},
],
loops: {},
parallels: {
'parallel-1': {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
},
'parallel-2': {
id: 'parallel-2',
nodes: [],
distribution: '<parallel.results>',
},
},
},
}
// Initialize first parallel
await handler.execute(parallel1Block, {}, context)
// Simulate execution of agent blocks
const parallelState = context.parallelExecutions?.get('parallel-1')
for (let i = 0; i < 2; i++) {
context.executedBlocks.add(`agent-1_parallel_parallel-1_iteration_${i}`)
parallelState!.executionResults.set(`iteration_${i}`, {
'agent-1': { content: `Result ${i}` },
})
}
// Re-execute first parallel to aggregate results
const result = await handler.execute(parallel1Block, {}, context)
expect((result as any).completed).toBe(true)
// Verify the block state is available
const blockState = context.blockStates.get('parallel-1')
expect(blockState).toBeDefined()
expect(blockState?.output.results).toHaveLength(2)
// Now when function block tries to resolve <parallel.results>, it should work
// even though parallel-2 exists on the canvas
expect(() => {
// This simulates what the resolver would do
const state = context.blockStates.get('parallel-1')
if (!state) throw new Error('No state found for block parallel-1')
const results = state.output?.results
if (!results) throw new Error('No results found')
return results
}).not.toThrow()
})
})
describe('PathTracker integration', () => {
it('should activate children when in active path', async () => {
const handler = new ParallelBlockHandler(mockResolver as any, mockPathTracker as any)
const block = createMockBlock('parallel-1')
const parallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
}
const context = createMockContext(parallel)
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
]
// Mock PathTracker to return true (block is in active path)
mockPathTracker.isInActivePath.mockReturnValue(true)
await handler.execute(block, {}, context)
// Should activate children when in active path
expect(context.activeExecutionPath.has('agent-1')).toBe(true)
expect(mockPathTracker.isInActivePath).toHaveBeenCalledWith('parallel-1', context)
})
it('should not activate children when not in active path', async () => {
const handler = new ParallelBlockHandler(mockResolver as any, mockPathTracker as any)
const block = createMockBlock('parallel-1')
const parallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
}
const context = createMockContext(parallel)
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
]
// Mock PathTracker to return false (block is not in active path)
mockPathTracker.isInActivePath.mockReturnValue(false)
await handler.execute(block, {}, context)
// Should not activate children when not in active path
expect(context.activeExecutionPath.has('agent-1')).toBe(false)
expect(mockPathTracker.isInActivePath).toHaveBeenCalledWith('parallel-1', context)
})
it('should handle PathTracker errors gracefully', async () => {
const handler = new ParallelBlockHandler(mockResolver as any, mockPathTracker as any)
const block = createMockBlock('parallel-1')
const parallel = {
id: 'parallel-1',
nodes: ['agent-1'],
distribution: ['item1', 'item2'],
}
const context = createMockContext(parallel)
context.workflow!.connections = [
{
source: 'parallel-1',
target: 'agent-1',
sourceHandle: 'parallel-start-source',
},
]
// Mock PathTracker to throw error
mockPathTracker.isInActivePath.mockImplementation(() => {
throw new Error('PathTracker error')
})
await handler.execute(block, {}, context)
// Should default to activating children when PathTracker fails
expect(context.activeExecutionPath.has('agent-1')).toBe(true)
})
})
})

View File

@@ -1,444 +0,0 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import { ParallelRoutingUtils } from '@/executor/parallels/utils'
import type { PathTracker } from '@/executor/path/path'
import type { InputResolver } from '@/executor/resolver/resolver'
import { Routing } from '@/executor/routing/routing'
import type { BlockHandler, ExecutionContext, StreamingExecution } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
const logger = createLogger('ParallelBlockHandler')
/**
* Handler for parallel blocks that manage concurrent execution of blocks.
* The parallel block sets up the execution state and lets the executor
* create virtual instances for true parallel execution.
*/
export class ParallelBlockHandler implements BlockHandler {
constructor(
private resolver?: InputResolver,
private pathTracker?: PathTracker
) {}
canHandle(block: SerializedBlock): boolean {
return block.metadata?.id === BlockType.PARALLEL
}
async execute(
block: SerializedBlock,
_inputs: Record<string, any>,
context: ExecutionContext
): Promise<BlockOutput | StreamingExecution> {
logger.info(`Executing parallel block: ${block.id}`)
// Get the parallel configuration from the workflow
const parallel = context.workflow?.parallels?.[block.id]
if (!parallel) {
logger.error(`Parallel configuration not found for block ${block.id}`, {
blockId: block.id,
availableParallels: Object.keys(context.workflow?.parallels || {}),
workflowParallels: context.workflow?.parallels,
})
throw new Error(`Parallel configuration not found for block ${block.id}`)
}
// Check if we're tracking parallel executions in context
if (!context.parallelExecutions) {
context.parallelExecutions = new Map()
}
// Get or initialize the parallel state
let parallelState = context.parallelExecutions.get(block.id)
// Check if all virtual blocks have completed (even before initialization)
if (parallelState) {
const allCompleted = this.checkAllIterationsCompleted(block.id, context)
if (allCompleted && !context.completedLoops.has(block.id)) {
logger.info(`All iterations completed for parallel ${block.id}, aggregating results`)
// Mark this parallel as completed
context.completedLoops.add(block.id)
// Check if we already have aggregated results stored (from a previous completion check)
const existingBlockState = context.blockStates.get(block.id)
if (existingBlockState?.output?.results) {
logger.info(`Parallel ${block.id} already has aggregated results, returning them`)
return existingBlockState.output
}
// Aggregate results
const results = []
for (let i = 0; i < parallelState.parallelCount; i++) {
const result = parallelState.executionResults.get(`iteration_${i}`)
if (result) {
results.push(result)
}
}
// Store the aggregated results in the block state so subsequent blocks can reference them
const aggregatedOutput = {
parallelId: block.id,
parallelCount: parallelState.parallelCount,
completed: true,
results,
message: `Completed all ${parallelState.parallelCount} executions`,
} as Record<string, any>
// Store the aggregated results in context so blocks connected to parallel-end-source can access them
context.blockStates.set(block.id, {
output: aggregatedOutput,
executed: true,
executionTime: 0, // Parallel coordination doesn't have meaningful execution time
})
// Activate the parallel-end-source connection to continue workflow
const parallelEndConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'parallel-end-source'
) || []
for (const conn of parallelEndConnections) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated post-parallel path to ${conn.target}`)
}
// Clean up iteration data
if (context.loopItems.has(`${block.id}_items`)) {
context.loopItems.delete(`${block.id}_items`)
}
if (context.loopItems.has(block.id)) {
context.loopItems.delete(block.id)
}
if (context.loopIterations.has(block.id)) {
context.loopIterations.delete(block.id)
}
return aggregatedOutput
}
}
if (!parallelState) {
logger.info(`Initializing parallel block ${block.id}`)
// Get the parallel type from the parallel config (explicit type to avoid inference bugs)
// If no explicit parallelType is set, infer it based on whether distribution exists
let parallelType = parallel.parallelType
if (!parallelType) {
// If there's a distribution, default to 'collection', otherwise default to 'count'
parallelType = parallel.distribution ? 'collection' : 'count'
}
const countValue = parallel.count || block.config?.params?.count || 1
logger.info(`Parallel ${block.id} configuration:`, {
parallelType,
countValue,
distribution: parallel.distribution,
configSource: parallel.count ? 'workflow_subflows' : 'block.config',
})
// Evaluate distribution items if provided and type is collection
let distributionItems: any[] | Record<string, any> | null = null
if (parallelType === 'collection' && parallel.distribution) {
distributionItems = await this.evaluateDistributionItems(
parallel.distribution,
context,
block
)
logger.info(`Evaluated distribution items for parallel ${block.id}:`, distributionItems)
}
// Determine the number of parallel executions
let parallelCount = 1
if (parallelType === 'count') {
// Use the count value for count-based parallel
parallelCount = Math.min(20, Math.max(1, countValue))
logger.info(`Parallel ${block.id} will execute ${parallelCount} times based on count`)
} else if (parallelType === 'collection' && distributionItems) {
// Use distribution items length for collection-based parallel
parallelCount = Array.isArray(distributionItems)
? distributionItems.length
: Object.keys(distributionItems).length
logger.info(
`Parallel ${block.id} will execute ${parallelCount} times based on distribution items`
)
} else {
// Simple parallel - single execution
parallelCount = 1
logger.info(`Parallel ${block.id} will execute ${parallelCount} time (simple mode)`)
}
// Initialize parallel execution state
parallelState = {
parallelCount,
distributionItems,
completedExecutions: 0,
executionResults: new Map<string, any>(),
activeIterations: new Set<number>(),
currentIteration: 1, // Start at 1 to indicate activation
parallelType,
}
// Initialize parallelExecutions if it doesn't exist
if (!context.parallelExecutions) {
context.parallelExecutions = new Map()
}
context.parallelExecutions.set(block.id, parallelState)
// Store the distribution items for access by child blocks
if (distributionItems) {
context.loopItems.set(`${block.id}_items`, distributionItems)
}
// Note: For simple count-based parallels without distribution, we don't store items
// Use routing strategy to determine if this block requires active path checking
const blockType = block.metadata?.id
if (Routing.requiresActivePathCheck(blockType || '')) {
let isInActivePath = true
if (this.pathTracker) {
try {
isInActivePath = this.pathTracker.isInActivePath(block.id, context)
} catch (error) {
logger.warn(`PathTracker check failed for ${blockType} block ${block.id}:`, error)
// Default to true to maintain existing behavior if PathTracker fails
isInActivePath = true
}
}
// Only activate child nodes if this block is in the active execution path
if (isInActivePath) {
this.activateChildNodes(block, context)
} else {
logger.info(
`${blockType} block ${block.id} is not in active execution path, skipping child activation`
)
}
} else {
// Regular blocks always activate their children
this.activateChildNodes(block, context)
}
return {
parallelId: block.id,
parallelCount,
distributionType: parallelType === 'count' ? 'count' : 'distributed',
started: true,
message: `Initialized ${parallelCount} parallel execution${parallelCount > 1 ? 's' : ''}`,
} as Record<string, any>
}
// Check if all virtual blocks have completed
const allCompleted = this.checkAllIterationsCompleted(block.id, context)
if (allCompleted) {
logger.info(`All iterations completed for parallel ${block.id}`)
// This case should have been handled earlier, but as a safety check
if (!context.completedLoops.has(block.id)) {
// Mark this parallel as completed
context.completedLoops.add(block.id)
// Check if we already have aggregated results stored (from a previous completion check)
const existingBlockState = context.blockStates.get(block.id)
if (existingBlockState?.output?.results) {
logger.info(`Parallel ${block.id} already has aggregated results, returning them`)
return existingBlockState.output
}
// Aggregate results
const results = []
for (let i = 0; i < parallelState.parallelCount; i++) {
const result = parallelState.executionResults.get(`iteration_${i}`)
if (result) {
results.push(result)
}
}
// Store the aggregated results in the block state so subsequent blocks can reference them
const aggregatedOutput = {
parallelId: block.id,
parallelCount: parallelState.parallelCount,
completed: true,
results,
message: `Completed all ${parallelState.parallelCount} executions`,
} as Record<string, any>
// Store the aggregated results in context so blocks connected to parallel-end-source can access them
context.blockStates.set(block.id, {
output: aggregatedOutput,
executed: true,
executionTime: 0, // Parallel coordination doesn't have meaningful execution time
})
// Activate the parallel-end-source connection to continue workflow
const parallelEndConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'parallel-end-source'
) || []
for (const conn of parallelEndConnections) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated post-parallel path to ${conn.target}`)
}
// Clean up iteration data
if (context.loopItems.has(`${block.id}_items`)) {
context.loopItems.delete(`${block.id}_items`)
}
if (context.loopItems.has(block.id)) {
context.loopItems.delete(block.id)
}
if (context.loopIterations.has(block.id)) {
context.loopIterations.delete(block.id)
}
return aggregatedOutput
}
// Already completed, return the stored results
const existingBlockState = context.blockStates.get(block.id)
if (existingBlockState?.output) {
return existingBlockState.output
}
}
// Still waiting for iterations to complete
const completedCount = this.countCompletedIterations(block.id, context)
return {
parallelId: block.id,
parallelCount: parallelState.parallelCount,
completedExecutions: completedCount,
activeIterations: parallelState.parallelCount - completedCount,
waiting: true,
message: `${completedCount} of ${parallelState.parallelCount} iterations completed`,
} as Record<string, any>
}
/**
* Activate child nodes for parallel execution
*/
private activateChildNodes(block: SerializedBlock, context: ExecutionContext): void {
// Activate all child nodes (the executor will handle creating virtual instances)
const parallelStartConnections =
context.workflow?.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === 'parallel-start-source'
) || []
for (const conn of parallelStartConnections) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated parallel path to ${conn.target}`)
}
}
/**
* Checks if all iterations of a parallel block have completed
*/
private checkAllIterationsCompleted(parallelId: string, context: ExecutionContext): boolean {
const parallel = context.workflow?.parallels?.[parallelId]
const parallelState = context.parallelExecutions?.get(parallelId)
if (!parallel || !parallelState) return false
// Use the shared utility that respects conditional routing
return ParallelRoutingUtils.areAllRequiredVirtualBlocksExecuted(
parallel,
parallelState.parallelCount,
context.executedBlocks,
context
)
}
/**
* Counts completed iterations for a parallel block
*/
private countCompletedIterations(parallelId: string, context: ExecutionContext): number {
const parallel = context.workflow?.parallels?.[parallelId]
const parallelState = context.parallelExecutions?.get(parallelId)
if (!parallel || !parallelState) return 0
let completedCount = 0
// Count iterations where all nodes have completed
for (let i = 0; i < parallelState.parallelCount; i++) {
let allNodesCompleted = true
for (const nodeId of parallel.nodes) {
const virtualBlockId = `${nodeId}_parallel_${parallelId}_iteration_${i}`
if (!context.executedBlocks.has(virtualBlockId)) {
allNodesCompleted = false
break
}
}
if (allNodesCompleted) {
completedCount++
}
}
return completedCount
}
/**
* Evaluates distribution items expression or value
*/
private async evaluateDistributionItems(
distribution: any,
context: ExecutionContext,
block: SerializedBlock
): Promise<any[] | Record<string, any> | null> {
// If already an array or object, return as-is
if (
Array.isArray(distribution) ||
(typeof distribution === 'object' && distribution !== null)
) {
return distribution
}
// If it's a string expression, try to evaluate it
if (typeof distribution === 'string') {
try {
const trimmed = distribution.trim()
if (trimmed.startsWith('//') || trimmed === '') {
return []
}
// Try to parse as JSON first
if (trimmed.startsWith('[') || trimmed.startsWith('{')) {
try {
return JSON.parse(trimmed)
} catch {
// Continue to expression evaluation
}
}
// If we have a resolver, use it to resolve any variable references first, then block references
if (this.resolver) {
const resolvedVars = this.resolver.resolveVariableReferences(distribution, block)
const resolved = this.resolver.resolveBlockReferences(resolvedVars, context, block)
// Try to parse the resolved value
try {
return JSON.parse(resolved)
} catch {
// If it's not valid JSON, try to evaluate as an expression
try {
const result = new Function(`return ${resolved}`)()
if (Array.isArray(result) || (typeof result === 'object' && result !== null)) {
return result
}
} catch (e) {
logger.error(`Error evaluating distribution expression: ${resolved}`, e)
}
}
}
logger.warn(`Distribution expression evaluation not fully implemented: ${distribution}`)
return null
} catch (error) {
logger.error(`Error evaluating distribution items:`, error)
return null
}
}
return null
}
}

View File

@@ -0,0 +1,46 @@
/**
* Handler Registry
*
* Central registry for all block handlers.
* Creates handlers for real user blocks (not infrastructure like sentinels).
*/
import type { BlockHandler } from '@/executor/types'
import { AgentBlockHandler } from './agent/agent-handler'
import { ApiBlockHandler } from './api/api-handler'
import { ConditionBlockHandler } from './condition/condition-handler'
import { EvaluatorBlockHandler } from './evaluator/evaluator-handler'
import { FunctionBlockHandler } from './function/function-handler'
import { GenericBlockHandler } from './generic/generic-handler'
import { ResponseBlockHandler } from './response/response-handler'
import { RouterBlockHandler } from './router/router-handler'
import { TriggerBlockHandler } from './trigger/trigger-handler'
import { VariablesBlockHandler } from './variables/variables-handler'
import { WaitBlockHandler } from './wait/wait-handler'
import { WorkflowBlockHandler } from './workflow/workflow-handler'
/**
* Create all block handlers
*
* Note: Sentinels are NOT included here - they're infrastructure handled
* by NodeExecutionOrchestrator, not user blocks.
*/
export function createBlockHandlers(): BlockHandler[] {
return [
// Core block handlers
new TriggerBlockHandler(),
new FunctionBlockHandler(),
new ApiBlockHandler(),
new ConditionBlockHandler(),
new RouterBlockHandler(),
new ResponseBlockHandler(),
new AgentBlockHandler(),
new VariablesBlockHandler(),
new WorkflowBlockHandler(),
new WaitBlockHandler(),
new EvaluatorBlockHandler(),
// Generic handler must be last (fallback)
new GenericBlockHandler(),
]
}

View File

@@ -1,7 +1,7 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import type { BlockHandler } from '@/executor/types'
import { BlockType, HTTP } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
const logger = createLogger('ResponseBlockHandler')
@@ -19,7 +19,11 @@ export class ResponseBlockHandler implements BlockHandler {
return block.metadata?.id === BlockType.RESPONSE
}
async execute(block: SerializedBlock, inputs: Record<string, any>): Promise<BlockOutput> {
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>
): Promise<BlockOutput> {
logger.info(`Executing response block: ${block.id}`)
try {
@@ -48,8 +52,8 @@ export class ResponseBlockHandler implements BlockHandler {
error: 'Response block execution failed',
message: error.message || 'Unknown error',
},
status: 500,
headers: { 'Content-Type': 'application/json' },
status: HTTP.STATUS.SERVER_ERROR,
headers: { 'Content-Type': HTTP.CONTENT_TYPE.JSON },
},
}
}
@@ -247,10 +251,10 @@ export class ResponseBlockHandler implements BlockHandler {
}
private parseStatus(status?: string): number {
if (!status) return 200
if (!status) return HTTP.STATUS.OK
const parsed = Number(status)
if (Number.isNaN(parsed) || parsed < 100 || parsed > 599) {
return 200
return HTTP.STATUS.OK
}
return parsed
}
@@ -261,7 +265,7 @@ export class ResponseBlockHandler implements BlockHandler {
cells: { Key: string; Value: string }
}[]
): Record<string, string> {
const defaultHeaders = { 'Content-Type': 'application/json' }
const defaultHeaders = { 'Content-Type': HTTP.CONTENT_TYPE.JSON }
if (!headers) return defaultHeaders
const headerObj = headers.reduce((acc: Record<string, string>, header) => {

View File

@@ -1,33 +1,21 @@
import '@/executor/__test-utils__/mock-dependencies'
import {
beforeEach,
describe,
expect,
it,
type Mock,
type Mocked,
type MockedClass,
vi,
} from 'vitest'
import { beforeEach, describe, expect, it, type Mock, vi } from 'vitest'
import { generateRouterPrompt } from '@/blocks/blocks/router'
import { BlockType } from '@/executor/consts'
import { RouterBlockHandler } from '@/executor/handlers/router/router-handler'
import { PathTracker } from '@/executor/path/path'
import type { ExecutionContext } from '@/executor/types'
import { getProviderFromModel } from '@/providers/utils'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
const mockGenerateRouterPrompt = generateRouterPrompt as Mock
const mockGetProviderFromModel = getProviderFromModel as Mock
const MockPathTracker = PathTracker as MockedClass<typeof PathTracker>
const mockFetch = global.fetch as unknown as Mock
describe('RouterBlockHandler', () => {
let handler: RouterBlockHandler
let mockBlock: SerializedBlock
let mockContext: ExecutionContext
let mockPathTracker: Mocked<PathTracker>
let mockWorkflow: Partial<SerializedWorkflow>
let mockTargetBlock1: SerializedBlock
let mockTargetBlock2: SerializedBlock
@@ -63,13 +51,12 @@ describe('RouterBlockHandler', () => {
mockWorkflow = {
blocks: [mockBlock, mockTargetBlock1, mockTargetBlock2],
connections: [
{ source: mockBlock.id, target: mockTargetBlock1.id },
{ source: mockBlock.id, target: mockTargetBlock2.id },
{ source: mockBlock.id, target: mockTargetBlock1.id, sourceHandle: 'condition-then1' },
{ source: mockBlock.id, target: mockTargetBlock2.id, sourceHandle: 'condition-else1' },
],
}
mockPathTracker = new MockPathTracker(mockWorkflow as SerializedWorkflow) as Mocked<PathTracker>
handler = new RouterBlockHandler(mockPathTracker)
handler = new RouterBlockHandler({})
mockContext = {
workflowId: 'test-workflow-id',
@@ -147,7 +134,7 @@ describe('RouterBlockHandler', () => {
},
]
const result = await handler.execute(mockBlock, inputs, mockContext)
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockGenerateRouterPrompt).toHaveBeenCalledWith(inputs.prompt, expectedTargetBlocks)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('gpt-4o')
@@ -185,6 +172,7 @@ describe('RouterBlockHandler', () => {
blockType: 'target',
blockTitle: 'Option A',
},
selectedRoute: 'target-block-1',
})
})
@@ -193,7 +181,7 @@ describe('RouterBlockHandler', () => {
mockContext.workflow!.blocks = [mockBlock, mockTargetBlock2]
// Expect execute to throw because getTargetBlocks (called internally) will throw
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Target block target-block-1 not found'
)
expect(mockFetch).not.toHaveBeenCalled()
@@ -217,7 +205,7 @@ describe('RouterBlockHandler', () => {
})
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Invalid routing decision: invalid-block-id'
)
})
@@ -225,7 +213,7 @@ describe('RouterBlockHandler', () => {
it('should use default model and temperature if not provided', async () => {
const inputs = { prompt: 'Choose.' }
await handler.execute(mockBlock, inputs, mockContext)
await handler.execute(mockContext, mockBlock, inputs)
expect(mockGetProviderFromModel).toHaveBeenCalledWith('gpt-4o')
@@ -249,6 +237,6 @@ describe('RouterBlockHandler', () => {
})
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow('Server error')
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow('Server error')
})
})

View File

@@ -2,8 +2,7 @@ import { createLogger } from '@/lib/logs/console/logger'
import { getBaseUrl } from '@/lib/urls/utils'
import { generateRouterPrompt } from '@/blocks/blocks/router'
import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/consts'
import type { PathTracker } from '@/executor/path/path'
import { BlockType, DEFAULTS, HTTP, isAgentBlockType, ROUTER } from '@/executor/consts'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import { calculateCost, getProviderFromModel } from '@/providers/utils'
import type { SerializedBlock } from '@/serializer/types'
@@ -14,25 +13,22 @@ const logger = createLogger('RouterBlockHandler')
* Handler for Router blocks that dynamically select execution paths.
*/
export class RouterBlockHandler implements BlockHandler {
/**
* @param pathTracker - Utility for tracking execution paths
*/
constructor(private pathTracker: PathTracker) {}
constructor(private pathTracker?: any) {}
canHandle(block: SerializedBlock): boolean {
return block.metadata?.id === BlockType.ROUTER
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput> {
const targetBlocks = this.getTargetBlocks(block, context)
const targetBlocks = this.getTargetBlocks(ctx, block)
const routerConfig = {
prompt: inputs.prompt,
model: inputs.model || 'gpt-4o',
model: inputs.model || ROUTER.DEFAULT_MODEL,
apiKey: inputs.apiKey,
}
@@ -41,7 +37,6 @@ export class RouterBlockHandler implements BlockHandler {
try {
const url = new URL('/api/providers', getBaseUrl())
// Create the provider request with proper message formatting
const messages = [{ role: 'user', content: routerConfig.prompt }]
const systemPrompt = generateRouterPrompt(routerConfig.prompt, targetBlocks)
const providerRequest = {
@@ -49,15 +44,15 @@ export class RouterBlockHandler implements BlockHandler {
model: routerConfig.model,
systemPrompt: systemPrompt,
context: JSON.stringify(messages),
temperature: 0.1,
temperature: ROUTER.INFERENCE_TEMPERATURE,
apiKey: routerConfig.apiKey,
workflowId: context.workflowId,
workflowId: ctx.workflowId,
}
const response = await fetch(url.toString(), {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Type': HTTP.CONTENT_TYPE.JSON,
},
body: JSON.stringify(providerRequest),
})
@@ -89,23 +84,27 @@ export class RouterBlockHandler implements BlockHandler {
throw new Error(`Invalid routing decision: ${chosenBlockId}`)
}
const tokens = result.tokens || { prompt: 0, completion: 0, total: 0 }
const tokens = result.tokens || {
prompt: DEFAULTS.TOKENS.PROMPT,
completion: DEFAULTS.TOKENS.COMPLETION,
total: DEFAULTS.TOKENS.TOTAL,
}
// Calculate cost based on token usage, similar to how providers do it
const cost = calculateCost(
result.model,
tokens.prompt || 0,
tokens.completion || 0,
false // Router blocks don't typically use cached input
tokens.prompt || DEFAULTS.TOKENS.PROMPT,
tokens.completion || DEFAULTS.TOKENS.COMPLETION,
false
)
return {
prompt: inputs.prompt,
model: result.model,
tokens: {
prompt: tokens.prompt || 0,
completion: tokens.completion || 0,
total: tokens.total || 0,
prompt: tokens.prompt || DEFAULTS.TOKENS.PROMPT,
completion: tokens.completion || DEFAULTS.TOKENS.COMPLETION,
total: tokens.total || DEFAULTS.TOKENS.TOTAL,
},
cost: {
input: cost.input,
@@ -114,41 +113,31 @@ export class RouterBlockHandler implements BlockHandler {
},
selectedPath: {
blockId: chosenBlock.id,
blockType: chosenBlock.type || 'unknown',
blockTitle: chosenBlock.title || 'Untitled Block',
blockType: chosenBlock.type || DEFAULTS.BLOCK_TYPE,
blockTitle: chosenBlock.title || DEFAULTS.BLOCK_TITLE,
},
}
selectedRoute: String(chosenBlock.id), // Used by ExecutionEngine to activate the correct edge
} as BlockOutput
} catch (error) {
logger.error('Router execution failed:', error)
throw error
}
}
/**
* Gets all potential target blocks for this router.
*
* @param block - Router block
* @param context - Current execution context
* @returns Array of potential target blocks with metadata
* @throws Error if target block not found
*/
private getTargetBlocks(block: SerializedBlock, context: ExecutionContext) {
return context.workflow?.connections
private getTargetBlocks(ctx: ExecutionContext, block: SerializedBlock) {
return ctx.workflow?.connections
.filter((conn) => conn.source === block.id)
.map((conn) => {
const targetBlock = context.workflow?.blocks.find((b) => b.id === conn.target)
const targetBlock = ctx.workflow?.blocks.find((b) => b.id === conn.target)
if (!targetBlock) {
throw new Error(`Target block ${conn.target} not found`)
}
// Extract system prompt for agent blocks
let systemPrompt = ''
if (targetBlock.metadata?.id === BlockType.AGENT) {
// Try to get system prompt from different possible locations
if (isAgentBlockType(targetBlock.metadata?.id)) {
systemPrompt =
targetBlock.config?.params?.systemPrompt || targetBlock.inputs?.systemPrompt || ''
// If system prompt is still not found, check if we can extract it from inputs
if (!systemPrompt && targetBlock.inputs) {
systemPrompt = targetBlock.inputs.systemPrompt || ''
}
@@ -163,7 +152,7 @@ export class RouterBlockHandler implements BlockHandler {
...targetBlock.config.params,
systemPrompt: systemPrompt,
},
currentState: context.blockStates.get(targetBlock.id)?.output,
currentState: ctx.blockStates.get(targetBlock.id)?.output,
}
})
}

View File

@@ -121,7 +121,7 @@ describe('TriggerBlockHandler', () => {
timestamp: '2023-01-01T12:00:00Z',
}
const result = await handler.execute(triggerBlock, triggerInputs, mockContext)
const result = await handler.execute(mockContext, triggerBlock, triggerInputs)
expect(result).toEqual(triggerInputs)
})
@@ -137,7 +137,7 @@ describe('TriggerBlockHandler', () => {
enabled: true,
}
const result = await handler.execute(triggerBlock, {}, mockContext)
const result = await handler.execute(mockContext, triggerBlock, {})
expect(result).toEqual({})
})
@@ -154,22 +154,15 @@ describe('TriggerBlockHandler', () => {
}
const webhookInputs = {
payload: {
event: 'user.created',
webhook: {
data: {
user: {
id: 'user123',
email: 'user@example.com',
},
provider: 'github',
payload: { event: 'push', repo: 'test-repo' },
},
},
headers: {
'content-type': 'application/json',
},
method: 'POST',
}
const result = await handler.execute(webhookBlock, webhookInputs, mockContext)
const result = await handler.execute(mockContext, webhookBlock, webhookInputs)
expect(result).toEqual(webhookInputs)
})
@@ -195,7 +188,7 @@ describe('TriggerBlockHandler', () => {
timestamp: '2023-01-01T14:30:00Z',
}
const result = await handler.execute(outlookBlock, outlookInputs, mockContext)
const result = await handler.execute(mockContext, outlookBlock, outlookInputs)
expect(result).toEqual(outlookInputs)
})
@@ -211,7 +204,7 @@ describe('TriggerBlockHandler', () => {
enabled: true,
}
const result = await handler.execute(scheduleBlock, {}, mockContext)
const result = await handler.execute(mockContext, scheduleBlock, {})
// Schedule triggers typically don't have input data, just trigger the workflow
expect(result).toEqual({})
@@ -248,7 +241,7 @@ describe('TriggerBlockHandler', () => {
timestamp: '2023-01-01T15:45:00Z',
}
const result = await handler.execute(triggerBlock, complexInputs, mockContext)
const result = await handler.execute(mockContext, triggerBlock, complexInputs)
expect(result).toEqual(complexInputs)
})

View File

@@ -21,29 +21,23 @@ export class TriggerBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<any> {
logger.info(`Executing trigger block: ${block.id} (Type: ${block.metadata?.id})`)
// If this trigger block was initialized with a precomputed output in the execution context
// (e.g., webhook payload injected at init), return it as-is to preserve the raw shape.
const existingState = context.blockStates.get(block.id)
const existingState = ctx.blockStates.get(block.id)
if (existingState?.output && Object.keys(existingState.output).length > 0) {
const existingOutput = existingState.output as any
const existingProvider = existingOutput?.webhook?.data?.provider
// Provider-specific output shaping should be handled upstream per trigger's webhook formatter
return existingOutput
}
// For trigger blocks, return the starter block's output which contains the workflow input
// This ensures webhook data like message, sender, chat, etc. are accessible
const starterBlock = context.workflow?.blocks?.find((b) => b.metadata?.id === 'starter')
const starterBlock = ctx.workflow?.blocks?.find((b) => b.metadata?.id === 'starter')
if (starterBlock) {
const starterState = context.blockStates.get(starterBlock.id)
const starterState = ctx.blockStates.get(starterBlock.id)
if (starterState?.output && Object.keys(starterState.output).length > 0) {
const starterOutput = starterState.output
@@ -59,7 +53,6 @@ export class TriggerBlockHandler implements BlockHandler {
blockType: block.metadata?.id,
})
// Provider-specific early return for GitHub: expose raw payload at root
if (provider === 'github') {
const payloadSource = webhookData.payload || {}
return {
@@ -70,98 +63,72 @@ export class TriggerBlockHandler implements BlockHandler {
if (provider === 'microsoftteams') {
const providerData = (starterOutput as any)[provider] || webhookData[provider] || {}
// Expose the raw Teams message payload at the root for easy indexing
const payloadSource = providerData?.message?.raw || webhookData.payload || {}
return {
...payloadSource,
// Keep nested copy for backwards compatibility with existing workflows
[provider]: providerData,
webhook: starterOutput.webhook,
}
}
// Provider-specific early return for Airtable: preserve raw shape entirely
if (provider === 'airtable') {
return starterOutput
}
// Extract the flattened properties that should be at root level (non-GitHub/Airtable)
const result: any = {
// Always keep the input at root level
input: starterOutput.input,
}
// FIRST: Copy all existing top-level properties (like 'event', 'message', etc.)
// This ensures that properties already flattened in webhook utils are preserved
for (const [key, value] of Object.entries(starterOutput)) {
if (key !== 'webhook' && key !== provider) {
result[key] = value
}
}
// SECOND: Generic extraction logic based on common webhook patterns
// Pattern 1: Provider-specific nested object (telegram, microsoftteams, etc.)
if (provider && starterOutput[provider]) {
// Copy all properties from provider object to root level for direct access
const providerData = starterOutput[provider]
for (const [key, value] of Object.entries(providerData)) {
// For other providers, keep existing logic (only copy objects)
if (typeof value === 'object' && value !== null) {
// Don't overwrite existing top-level properties
if (!result[key]) {
result[key] = value
}
}
}
// Keep nested structure for backwards compatibility
result[provider] = providerData
}
// Pattern 2: Provider data directly in webhook.data (based on actual structure)
else if (provider && webhookData[provider]) {
} else if (provider && webhookData[provider]) {
const providerData = webhookData[provider]
// Extract all provider properties to root level
for (const [key, value] of Object.entries(providerData)) {
if (typeof value === 'object' && value !== null) {
// Don't overwrite existing top-level properties
if (!result[key]) {
result[key] = value
}
}
}
// Keep nested structure for backwards compatibility
result[provider] = providerData
}
// Pattern 3: Email providers with data in webhook.data.payload.email (Gmail, Outlook)
else if (
} else if (
provider &&
(provider === 'gmail' || provider === 'outlook') &&
webhookData.payload?.email
) {
const emailData = webhookData.payload.email
// Flatten email fields to root level for direct access
for (const [key, value] of Object.entries(emailData)) {
if (!result[key]) {
result[key] = value
}
}
// Keep the email object for backwards compatibility
result.email = emailData
// Also keep timestamp if present in payload
if (webhookData.payload.timestamp) {
result.timestamp = webhookData.payload.timestamp
}
}
// Always keep webhook metadata
if (starterOutput.webhook) result.webhook = starterOutput.webhook
return result
@@ -174,7 +141,6 @@ export class TriggerBlockHandler implements BlockHandler {
}
}
// Fallback to resolved inputs if no starter block output
if (inputs && Object.keys(inputs).length > 0) {
logger.debug(`Returning trigger inputs for block ${block.id}`, {
inputKeys: Object.keys(inputs),
@@ -182,7 +148,6 @@ export class TriggerBlockHandler implements BlockHandler {
return inputs
}
// Fallback - return empty object for trigger blocks with no inputs
logger.debug(`No inputs provided for trigger block ${block.id}, returning empty object`)
return {}
}

View File

@@ -18,9 +18,9 @@ export class VariablesBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput> {
logger.info(`Executing variables block: ${block.id}`, {
blockName: block.metadata?.name,
@@ -29,22 +29,22 @@ export class VariablesBlockHandler implements BlockHandler {
})
try {
if (!context.workflowVariables) {
context.workflowVariables = {}
if (!ctx.workflowVariables) {
ctx.workflowVariables = {}
}
const assignments = this.parseAssignments(inputs.variables)
for (const assignment of assignments) {
const existingEntry = assignment.variableId
? [assignment.variableId, context.workflowVariables[assignment.variableId]]
: Object.entries(context.workflowVariables).find(
? [assignment.variableId, ctx.workflowVariables[assignment.variableId]]
: Object.entries(ctx.workflowVariables).find(
([_, v]) => v.name === assignment.variableName
)
if (existingEntry?.[1]) {
const [id, variable] = existingEntry
context.workflowVariables[id] = {
ctx.workflowVariables[id] = {
...variable,
value: assignment.value,
}
@@ -55,8 +55,8 @@ export class VariablesBlockHandler implements BlockHandler {
logger.info('Variables updated', {
updatedVariables: assignments.map((a) => a.variableName),
allVariables: Object.values(context.workflowVariables).map((v: any) => v.name),
updatedValues: Object.entries(context.workflowVariables).map(([id, v]: [string, any]) => ({
allVariables: Object.values(ctx.workflowVariables).map((v: any) => v.name),
updatedValues: Object.entries(ctx.workflowVariables).map(([id, v]: [string, any]) => ({
id,
name: v.name,
value: v.value,

View File

@@ -47,9 +47,9 @@ export class WaitBlockHandler implements BlockHandler {
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<any> {
logger.info(`Executing Wait block: ${block.id}`, { inputs })
@@ -80,8 +80,7 @@ export class WaitBlockHandler implements BlockHandler {
// Actually sleep for the specified duration
// The executor updates context.isCancelled when cancel() is called
const checkCancelled = () => {
// Check if execution was marked as cancelled in the context
return (context as any).isCancelled === true
return (ctx as any).isCancelled === true
}
const completed = await sleep(waitMs, checkCancelled)

View File

@@ -103,7 +103,7 @@ describe('WorkflowBlockHandler', () => {
it('should throw error when no workflowId is provided', async () => {
const inputs = {}
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'No workflow selected for execution'
)
})
@@ -118,7 +118,7 @@ describe('WorkflowBlockHandler', () => {
'level1_sub_level2_sub_level3_sub_level4_sub_level5_sub_level6_sub_level7_sub_level8_sub_level9_sub_level10_sub_level11',
}
await expect(handler.execute(mockBlock, inputs, deepContext)).rejects.toThrow(
await expect(handler.execute(deepContext, mockBlock, inputs)).rejects.toThrow(
'Error in child workflow "child-workflow-id": Maximum workflow nesting depth of 10 exceeded'
)
})
@@ -132,7 +132,7 @@ describe('WorkflowBlockHandler', () => {
statusText: 'Not Found',
})
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Error in child workflow "non-existent-workflow": Child workflow non-existent-workflow not found'
)
})
@@ -142,7 +142,7 @@ describe('WorkflowBlockHandler', () => {
mockFetch.mockRejectedValueOnce(new Error('Network error'))
await expect(handler.execute(mockBlock, inputs, mockContext)).rejects.toThrow(
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
'Error in child workflow "child-workflow-id": Network error'
)
})

View File

@@ -1,17 +1,17 @@
import { generateInternalToken } from '@/lib/auth/internal'
import { createLogger } from '@/lib/logs/console/logger'
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import type { TraceSpan } from '@/lib/logs/types'
import { getBaseUrl } from '@/lib/urls/utils'
import type { BlockOutput } from '@/blocks/types'
import { Executor } from '@/executor'
import { BlockType } from '@/executor/consts'
import { BlockType, DEFAULTS, HTTP } from '@/executor/consts'
import type {
BlockHandler,
ExecutionContext,
ExecutionResult,
StreamingExecution,
} from '@/executor/types'
import { buildAPIUrl, buildAuthHeaders } from '@/executor/utils/http'
import { parseJSON } from '@/executor/utils/json'
import { Serializer } from '@/serializer'
import type { SerializedBlock } from '@/serializer/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
@@ -24,9 +24,6 @@ type WorkflowTraceSpan = TraceSpan & {
output?: (Record<string, unknown> & { childTraceSpans?: WorkflowTraceSpan[] }) | null
}
// Maximum allowed depth for nested workflow executions
const MAX_WORKFLOW_DEPTH = 10
/**
* Handler for workflow blocks that execute other workflows inline.
* Creates sub-execution contexts and manages data flow between parent and child workflows.
@@ -34,26 +31,15 @@ const MAX_WORKFLOW_DEPTH = 10
export class WorkflowBlockHandler implements BlockHandler {
private serializer = new Serializer()
// Tolerant JSON parser for mapping values
// Keeps handler self-contained without introducing utilities
private safeParse(input: unknown): unknown {
if (typeof input !== 'string') return input
try {
return JSON.parse(input)
} catch {
return input
}
}
canHandle(block: SerializedBlock): boolean {
const id = block.metadata?.id
return id === BlockType.WORKFLOW || id === 'workflow_input'
return id === BlockType.WORKFLOW || id === BlockType.WORKFLOW_INPUT
}
async execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput | StreamingExecution> {
logger.info(`Executing workflow block: ${block.id}`)
@@ -64,14 +50,12 @@ export class WorkflowBlockHandler implements BlockHandler {
}
try {
// Check execution depth
const currentDepth = (context.workflowId?.split('_sub_').length || 1) - 1
if (currentDepth >= MAX_WORKFLOW_DEPTH) {
throw new Error(`Maximum workflow nesting depth of ${MAX_WORKFLOW_DEPTH} exceeded`)
const currentDepth = (ctx.workflowId?.split('_sub_').length || 1) - 1
if (currentDepth >= DEFAULTS.MAX_WORKFLOW_DEPTH) {
throw new Error(`Maximum workflow nesting depth of ${DEFAULTS.MAX_WORKFLOW_DEPTH} exceeded`)
}
// In deployed contexts, enforce that child workflow has an active deployment
if (context.isDeployedContext) {
if (ctx.isDeployedContext) {
const hasActiveDeployment = await this.checkChildDeployment(workflowId)
if (!hasActiveDeployment) {
throw new Error(
@@ -80,8 +64,7 @@ export class WorkflowBlockHandler implements BlockHandler {
}
}
// Load the child workflow
const childWorkflow = context.isDeployedContext
const childWorkflow = ctx.isDeployedContext
? await this.loadChildWorkflowDeployed(workflowId)
: await this.loadChildWorkflow(workflowId)
@@ -98,14 +81,10 @@ export class WorkflowBlockHandler implements BlockHandler {
`Executing child workflow: ${childWorkflowName} (${workflowId}) at depth ${currentDepth}`
)
// Prepare the input for the child workflow
// Prefer structured mapping if provided; otherwise fall back to legacy 'input' passthrough
let childWorkflowInput: Record<string, any> = {}
if (inputs.inputMapping !== undefined && inputs.inputMapping !== null) {
// Handle inputMapping - could be object or stringified JSON
const raw = inputs.inputMapping
const normalized = this.safeParse(raw)
const normalized = parseJSON(inputs.inputMapping, inputs.inputMapping)
if (normalized && typeof normalized === 'object' && !Array.isArray(normalized)) {
childWorkflowInput = normalized as Record<string, any>
@@ -113,40 +92,28 @@ export class WorkflowBlockHandler implements BlockHandler {
childWorkflowInput = {}
}
} else if (inputs.input !== undefined) {
// Legacy behavior: pass under start.input
childWorkflowInput = inputs.input
}
// Remove the workflowId from the input to avoid confusion
const { workflowId: _, input: __, ...otherInputs } = inputs
// Execute child workflow inline
const subExecutor = new Executor({
workflow: childWorkflow.serializedState,
workflowInput: childWorkflowInput,
envVarValues: context.environmentVariables,
envVarValues: ctx.environmentVariables,
workflowVariables: childWorkflow.variables || {},
contextExtensions: {
isChildExecution: true, // Prevent child executor from managing global state
// Propagate deployed context down to child execution so nested children obey constraints
isDeployedContext: context.isDeployedContext === true,
isChildExecution: true,
isDeployedContext: ctx.isDeployedContext === true,
},
})
const startTime = performance.now()
// Use the actual child workflow ID for authentication, not the execution ID
// This ensures knowledge base and other API calls can properly authenticate
const result = await subExecutor.execute(workflowId)
const executionResult = this.toExecutionResult(result)
const duration = performance.now() - startTime
logger.info(`Child workflow ${childWorkflowName} completed in ${Math.round(duration)}ms`)
const childTraceSpans = this.captureChildWorkflowLogs(
executionResult,
childWorkflowName,
context
)
const childTraceSpans = this.captureChildWorkflowLogs(executionResult, childWorkflowName, ctx)
const mappedResult = this.mapChildOutputToParent(
executionResult,
@@ -161,7 +128,6 @@ export class WorkflowBlockHandler implements BlockHandler {
const errorWithSpans = new Error(
`Error in child workflow "${childWorkflowName}": ${childError}`
) as any
// Attach trace spans and name for higher-level logging to consume
errorWithSpans.childTraceSpans = childTraceSpans
errorWithSpans.childWorkflowName = childWorkflowName
errorWithSpans.executionResult = executionResult
@@ -178,7 +144,7 @@ export class WorkflowBlockHandler implements BlockHandler {
const originalError = error.message || 'Unknown error'
if (originalError.startsWith('Error in child workflow')) {
throw error // Re-throw as-is to avoid duplication
throw error
}
const wrappedError = new Error(
@@ -197,24 +163,14 @@ export class WorkflowBlockHandler implements BlockHandler {
}
}
/**
* Loads a child workflow from the API
*/
private async loadChildWorkflow(workflowId: string) {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (typeof window === 'undefined') {
const token = await generateInternalToken()
headers.Authorization = `Bearer ${token}`
}
const headers = await buildAuthHeaders()
const url = buildAPIUrl(`/api/workflows/${workflowId}`)
const response = await fetch(`${getBaseUrl()}/api/workflows/${workflowId}`, {
headers,
})
const response = await fetch(url.toString(), { headers })
if (!response.ok) {
if (response.status === 404) {
if (response.status === HTTP.STATUS.NOT_FOUND) {
logger.warn(`Child workflow ${workflowId} not found`)
return null
}
@@ -233,13 +189,13 @@ export class WorkflowBlockHandler implements BlockHandler {
if (!workflowState || !workflowState.blocks) {
throw new Error(`Child workflow ${workflowId} has invalid state`)
}
// Important: do not swallow serialization/validation errors
const serializedWorkflow = this.serializer.serializeWorkflow(
workflowState.blocks,
workflowState.edges || [],
workflowState.loops || {},
workflowState.parallels || {},
true // Enable validation during execution
true
)
const workflowVariables = (workflowData.variables as Record<string, any>) || {}
@@ -259,25 +215,19 @@ export class WorkflowBlockHandler implements BlockHandler {
}
}
/**
* Checks if a workflow has an active deployed version
*/
private async checkChildDeployment(workflowId: string): Promise<boolean> {
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (typeof window === 'undefined') {
const token = await generateInternalToken()
headers.Authorization = `Bearer ${token}`
}
const response = await fetch(`${getBaseUrl()}/api/workflows/${workflowId}/deployed`, {
const headers = await buildAuthHeaders()
const url = buildAPIUrl(`/api/workflows/${workflowId}/deployed`)
const response = await fetch(url.toString(), {
headers,
cache: 'no-store',
})
if (!response.ok) return false
const json = await response.json()
// API returns { deployedState: state | null }
return !!json?.data?.deployedState || !!json?.deployedState
} catch (e) {
logger.error(`Failed to check child deployment for ${workflowId}:`, e)
@@ -285,25 +235,17 @@ export class WorkflowBlockHandler implements BlockHandler {
}
}
/**
* Loads child workflow using deployed state (for API/webhook/schedule/chat executions)
*/
private async loadChildWorkflowDeployed(workflowId: string) {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (typeof window === 'undefined') {
const token = await generateInternalToken()
headers.Authorization = `Bearer ${token}`
}
const headers = await buildAuthHeaders()
const deployedUrl = buildAPIUrl(`/api/workflows/${workflowId}/deployed`)
// Fetch deployed state
const deployedRes = await fetch(`${getBaseUrl()}/api/workflows/${workflowId}/deployed`, {
const deployedRes = await fetch(deployedUrl.toString(), {
headers,
cache: 'no-store',
})
if (!deployedRes.ok) {
if (deployedRes.status === 404) {
if (deployedRes.status === HTTP.STATUS.NOT_FOUND) {
return null
}
throw new Error(
@@ -316,18 +258,18 @@ export class WorkflowBlockHandler implements BlockHandler {
throw new Error(`Deployed state missing or invalid for child workflow ${workflowId}`)
}
// Fetch variables and name from live metadata (variables are not stored in deployments)
const metaRes = await fetch(`${getBaseUrl()}/api/workflows/${workflowId}`, {
const metaUrl = buildAPIUrl(`/api/workflows/${workflowId}`)
const metaRes = await fetch(metaUrl.toString(), {
headers,
cache: 'no-store',
})
if (!metaRes.ok) {
throw new Error(`Failed to fetch workflow metadata: ${metaRes.status} ${metaRes.statusText}`)
}
const metaJson = await metaRes.json()
const wfData = metaJson?.data
// Important: do not swallow serialization/validation errors
const serializedWorkflow = this.serializer.serializeWorkflow(
deployedState.blocks,
deployedState.edges || [],
@@ -339,7 +281,7 @@ export class WorkflowBlockHandler implements BlockHandler {
const workflowVariables = (wfData?.variables as Record<string, any>) || {}
return {
name: wfData?.name || 'Workflow',
name: wfData?.name || DEFAULTS.WORKFLOW_NAME,
serializedState: serializedWorkflow,
variables: workflowVariables,
}
@@ -381,9 +323,6 @@ export class WorkflowBlockHandler implements BlockHandler {
}
}
/**
* Transforms trace span for child workflow context
*/
private transformSpanForChildWorkflow(
span: WorkflowTraceSpan,
childWorkflowName: string
@@ -480,9 +419,6 @@ export class WorkflowBlockHandler implements BlockHandler {
return !span.blockId
}
/**
* Maps child workflow output to parent block output
*/
private mapChildOutputToParent(
childResult: ExecutionResult,
childWorkflowId: string,
@@ -498,15 +434,12 @@ export class WorkflowBlockHandler implements BlockHandler {
childWorkflowName,
error: childResult.error || 'Child workflow execution failed',
}
// Only include spans when present to keep output stable for callers/tests
if (Array.isArray(childTraceSpans) && childTraceSpans.length > 0) {
failure.childTraceSpans = childTraceSpans
}
return failure as Record<string, any>
}
// childResult is an ExecutionResult with structure { success, output, metadata, logs }
// We want the actual output from the execution
const result = childResult.output || {}
return {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,688 +0,0 @@
import { beforeEach, describe, expect, test, vi } from 'vitest'
import { createMockContext } from '@/executor/__test-utils__/executor-mocks'
import { BlockType } from '@/executor/consts'
import { LoopManager } from '@/executor/loops/loops'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedLoop, SerializedWorkflow } from '@/serializer/types'
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: () => ({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
}),
}))
describe('LoopManager', () => {
let manager: LoopManager
let mockContext: ExecutionContext
const createBasicLoop = (overrides?: Partial<SerializedLoop>): SerializedLoop => ({
id: 'loop-1',
nodes: ['block-1', 'block-2'],
iterations: 3,
loopType: 'for',
...overrides,
})
const createForEachLoop = (items: any, overrides?: Partial<SerializedLoop>): SerializedLoop => ({
id: 'loop-1',
nodes: ['block-1', 'block-2'],
iterations: 5,
loopType: 'forEach',
forEachItems: items,
...overrides,
})
const createWorkflowWithLoop = (loop: SerializedLoop): SerializedWorkflow => ({
version: '2.0',
blocks: [
{
id: 'starter',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'loop-1',
position: { x: 100, y: 0 },
metadata: { id: BlockType.LOOP, name: 'Test Loop' },
config: { tool: BlockType.LOOP, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'block-1',
position: { x: 200, y: 0 },
metadata: { id: BlockType.FUNCTION, name: 'Block 1' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'block-2',
position: { x: 300, y: 0 },
metadata: { id: BlockType.FUNCTION, name: 'Block 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'after-loop',
position: { x: 400, y: 0 },
metadata: { id: BlockType.FUNCTION, name: 'After Loop' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'starter', target: 'loop-1' },
{ source: 'loop-1', target: 'block-1', sourceHandle: 'loop-start-source' },
{ source: 'block-1', target: 'block-2' },
{ source: 'block-2', target: 'loop-1' },
{ source: 'loop-1', target: 'after-loop', sourceHandle: 'loop-end-source' },
],
loops: {
'loop-1': loop,
},
parallels: {},
})
beforeEach(() => {
const loops = {
'loop-1': createBasicLoop(),
}
manager = new LoopManager(loops)
mockContext = createMockContext({
workflow: createWorkflowWithLoop(createBasicLoop()),
loopIterations: new Map([['loop-1', 0]]),
loopItems: new Map(),
executedBlocks: new Set(),
activeExecutionPath: new Set(['starter', 'loop-1']),
completedLoops: new Set(),
})
})
describe('constructor', () => {
test('should initialize with provided loops', () => {
const loops = {
'loop-1': createBasicLoop(),
'loop-2': createBasicLoop({ id: 'loop-2', iterations: 5 }),
}
const loopManager = new LoopManager(loops)
expect(loopManager.getIterations('loop-1')).toBe(3)
expect(loopManager.getIterations('loop-2')).toBe(5)
})
test('should use default iterations for unknown loops', () => {
const loopManager = new LoopManager({})
expect(loopManager.getIterations('unknown-loop')).toBe(5) // default
})
test('should accept custom default iterations', () => {
const loopManager = new LoopManager({}, 10)
expect(loopManager.getIterations('unknown-loop')).toBe(10)
})
})
describe('processLoopIterations', () => {
test('should return false when no loops exist', async () => {
const emptyManager = new LoopManager({})
const result = await emptyManager.processLoopIterations(mockContext)
expect(result).toBe(false)
})
test('should skip loops that are already completed', async () => {
mockContext.completedLoops.add('loop-1')
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(false)
})
test('should skip loops where loop block has not been executed', async () => {
// Loop block not in executed blocks
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(false)
})
test('should skip loops where not all blocks have been executed', async () => {
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
// block-2 not executed yet
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(false)
})
test('should reset blocks and continue iteration when not at max iterations', async () => {
// Set up as if we've completed one iteration
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 1) // First iteration completed
// Add some block states to verify they get reset
mockContext.blockStates.set('block-1', {
output: { result: 'test' },
executed: true,
executionTime: 100,
})
mockContext.blockStates.set('block-2', {
output: { result: 'test2' },
executed: true,
executionTime: 200,
})
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(false) // Not at max iterations yet
// Verify blocks were reset
expect(mockContext.executedBlocks.has('block-1')).toBe(false)
expect(mockContext.executedBlocks.has('block-2')).toBe(false)
expect(mockContext.executedBlocks.has('loop-1')).toBe(false) // Loop block also reset
// Verify block states were cleared
expect(mockContext.blockStates.has('block-1')).toBe(false)
expect(mockContext.blockStates.has('block-2')).toBe(false)
expect(mockContext.blockStates.has('loop-1')).toBe(false)
// Verify blocks were removed from active execution path
expect(mockContext.activeExecutionPath.has('block-1')).toBe(false)
expect(mockContext.activeExecutionPath.has('block-2')).toBe(false)
})
test('should complete loop and activate end connections when max iterations reached', async () => {
// Set up as if we've completed all iterations
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 3) // Max iterations reached
// Set up loop execution state with some results
mockContext.loopExecutions = new Map()
mockContext.loopExecutions.set('loop-1', {
maxIterations: 3,
loopType: 'for',
forEachItems: null,
executionResults: new Map([
['iteration_0', { iteration: { 'block-1': { result: 'result1' } } }],
['iteration_1', { iteration: { 'block-1': { result: 'result2' } } }],
['iteration_2', { iteration: { 'block-1': { result: 'result3' } } }],
]),
currentIteration: 3,
})
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(true) // Loop reached max iterations
// Verify loop was marked as completed
expect(mockContext.completedLoops.has('loop-1')).toBe(true)
// Verify loop block state was updated with aggregated results
const loopBlockState = mockContext.blockStates.get('loop-1')
expect(loopBlockState).toBeDefined()
expect(loopBlockState?.output.completed).toBe(true)
expect(loopBlockState?.output.results).toHaveLength(3)
// Verify end connection was activated
expect(mockContext.activeExecutionPath.has('after-loop')).toBe(true)
})
test('should handle forEach loops with array items', async () => {
const forEachLoop = createForEachLoop(['item1', 'item2', 'item3'])
manager = new LoopManager({ 'loop-1': forEachLoop })
mockContext.workflow!.loops['loop-1'] = forEachLoop
// Set up as if we've completed all iterations
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 3) // All items processed
// Store items in context as the loop handler would
mockContext.loopItems.set('loop-1_items', ['item1', 'item2', 'item3'])
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(true) // Loop completed
expect(mockContext.completedLoops.has('loop-1')).toBe(true)
const loopBlockState = mockContext.blockStates.get('loop-1')
expect(loopBlockState?.output.loopType).toBe('forEach')
expect(loopBlockState?.output.maxIterations).toBe(3)
})
test('should handle forEach loops with object items', async () => {
const items = { key1: 'value1', key2: 'value2' }
const forEachLoop = createForEachLoop(items)
manager = new LoopManager({ 'loop-1': forEachLoop })
mockContext.workflow!.loops['loop-1'] = forEachLoop
// Set up as if we've completed all iterations
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 2) // All items processed
// Store items in context as the loop handler would
mockContext.loopItems.set('loop-1_items', items)
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(true) // Loop completed
expect(mockContext.completedLoops.has('loop-1')).toBe(true)
const loopBlockState = mockContext.blockStates.get('loop-1')
expect(loopBlockState?.output.maxIterations).toBe(2)
})
test('should handle forEach loops with string items', async () => {
const forEachLoop = createForEachLoop('["a", "b", "c"]') // JSON string
manager = new LoopManager({ 'loop-1': forEachLoop })
mockContext.workflow!.loops['loop-1'] = forEachLoop
// Set up as if we've completed all iterations
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 3) // All items processed
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(true) // Loop completed
expect(mockContext.completedLoops.has('loop-1')).toBe(true)
})
})
describe('storeIterationResult', () => {
test('should create new loop state if none exists', () => {
const output = { result: 'test result' }
manager.storeIterationResult(mockContext, 'loop-1', 0, output)
expect(mockContext.loopExecutions).toBeDefined()
const loopState = mockContext.loopExecutions!.get('loop-1')
expect(loopState).toBeDefined()
expect(loopState?.maxIterations).toBe(3)
expect(loopState?.loopType).toBe('for')
expect(loopState?.executionResults.get('iteration_0')).toEqual(output)
})
test('should add to existing loop state', () => {
// Initialize loop state
mockContext.loopExecutions = new Map()
mockContext.loopExecutions.set('loop-1', {
maxIterations: 3,
loopType: 'for',
forEachItems: null,
executionResults: new Map(),
currentIteration: 0,
})
const output1 = { result: 'result1' }
const output2 = { result: 'result2' }
manager.storeIterationResult(mockContext, 'loop-1', 0, output1)
manager.storeIterationResult(mockContext, 'loop-1', 0, output2)
const loopState = mockContext.loopExecutions.get('loop-1')
const iterationResults = loopState?.executionResults.get('iteration_0')
// When multiple results are stored for the same iteration, they are combined into an array
expect(iterationResults).toEqual([output1, output2])
})
test('should handle forEach loop state creation', () => {
const forEachLoop = createForEachLoop(['item1', 'item2'])
manager = new LoopManager({ 'loop-1': forEachLoop })
const output = { result: 'test result' }
manager.storeIterationResult(mockContext, 'loop-1', 0, output)
const loopState = mockContext.loopExecutions!.get('loop-1')
expect(loopState?.loopType).toBe('forEach')
expect(loopState?.forEachItems).toEqual(['item1', 'item2'])
})
})
describe('getLoopIndex', () => {
test('should return current iteration for existing loop', () => {
mockContext.loopIterations.set('loop-1', 2)
const index = manager.getLoopIndex('loop-1', 'block-1', mockContext)
expect(index).toBe(2)
})
test('should return 0 for non-existent loop iteration', () => {
const index = manager.getLoopIndex('non-existent', 'block-1', mockContext)
expect(index).toBe(0)
})
test('should return 0 for unknown loop', () => {
const unknownManager = new LoopManager({})
const index = unknownManager.getLoopIndex('unknown', 'block-1', mockContext)
expect(index).toBe(0)
})
})
describe('getIterations', () => {
test('should return iterations for existing loop', () => {
expect(manager.getIterations('loop-1')).toBe(3)
})
test('should return default iterations for non-existent loop', () => {
expect(manager.getIterations('non-existent')).toBe(5) // default
})
})
describe('getCurrentItem', () => {
test('should return current item for loop', () => {
mockContext.loopItems.set('loop-1', ['current-item'])
const item = manager.getCurrentItem('loop-1', mockContext)
expect(item).toEqual(['current-item'])
})
test('should return undefined for non-existent loop item', () => {
const item = manager.getCurrentItem('non-existent', mockContext)
expect(item).toBeUndefined()
})
})
describe('allBlocksExecuted (private method testing through processLoopIterations)', () => {
test('should handle router blocks with selected paths', async () => {
// Create a workflow with a router block inside the loop
const workflow = createWorkflowWithLoop(createBasicLoop())
workflow.blocks[2].metadata!.id = BlockType.ROUTER // Make block-1 a router
workflow.connections = [
{ source: 'starter', target: 'loop-1' },
{ source: 'loop-1', target: 'block-1', sourceHandle: 'loop-start-source' },
{ source: 'block-1', target: 'block-2' }, // Router selects block-2
{ source: 'block-1', target: 'alternative-block' }, // Alternative path
{ source: 'block-2', target: 'loop-1' },
{ source: 'loop-1', target: 'after-loop', sourceHandle: 'loop-end-source' },
]
mockContext.workflow = workflow
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.decisions.router.set('block-1', 'block-2') // Router selected block-2
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should process the iteration since all reachable blocks are executed
expect(result).toBe(false) // Not at max iterations yet
})
test('should handle condition blocks with selected paths', async () => {
// Create a workflow with a condition block inside the loop
const workflow = createWorkflowWithLoop(createBasicLoop())
workflow.blocks[2].metadata!.id = BlockType.CONDITION // Make block-1 a condition
workflow.connections = [
{ source: 'starter', target: 'loop-1' },
{ source: 'loop-1', target: 'block-1', sourceHandle: 'loop-start-source' },
{ source: 'block-1', target: 'block-2', sourceHandle: 'condition-true' },
{ source: 'block-1', target: 'alternative-block', sourceHandle: 'condition-false' },
{ source: 'block-2', target: 'loop-1' },
{ source: 'loop-1', target: 'after-loop', sourceHandle: 'loop-end-source' },
]
mockContext.workflow = workflow
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.decisions.condition.set('block-1', 'true') // Condition selected true path
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should process the iteration since all reachable blocks are executed
expect(result).toBe(false) // Not at max iterations yet
})
test('should handle error connections properly', async () => {
// Create a workflow with error handling inside the loop
const workflow = createWorkflowWithLoop(createBasicLoop())
workflow.connections = [
{ source: 'starter', target: 'loop-1' },
{ source: 'loop-1', target: 'block-1', sourceHandle: 'loop-start-source' },
{ source: 'block-1', target: 'block-2', sourceHandle: 'source' },
{ source: 'block-1', target: 'error-handler', sourceHandle: 'error' },
{ source: 'block-2', target: 'loop-1' },
{ source: 'loop-1', target: 'after-loop', sourceHandle: 'loop-end-source' },
]
mockContext.workflow = workflow
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
// Set block-1 to have no error (successful execution)
mockContext.blockStates.set('block-1', {
output: { result: 'success' },
executed: true,
executionTime: 100,
})
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should process the iteration since the success path was followed
expect(result).toBe(false) // Not at max iterations yet
})
test('should handle blocks with errors following error paths', async () => {
// Create a workflow with error handling inside the loop
const workflow = createWorkflowWithLoop(createBasicLoop())
workflow.blocks.push({
id: 'error-handler',
position: { x: 350, y: 100 },
metadata: { id: BlockType.FUNCTION, name: 'Error Handler' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
})
workflow.loops['loop-1'].nodes.push('error-handler')
workflow.connections = [
{ source: 'starter', target: 'loop-1' },
{ source: 'loop-1', target: 'block-1', sourceHandle: 'loop-start-source' },
{ source: 'block-1', target: 'block-2', sourceHandle: 'source' },
{ source: 'block-1', target: 'error-handler', sourceHandle: 'error' },
{ source: 'error-handler', target: 'loop-1' },
{ source: 'block-2', target: 'loop-1' },
{ source: 'loop-1', target: 'after-loop', sourceHandle: 'loop-end-source' },
]
mockContext.workflow = workflow
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('error-handler')
// Set block-1 to have an error
mockContext.blockStates.set('block-1', {
output: {
error: 'Something went wrong',
},
executed: true,
executionTime: 100,
})
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should process the iteration since the error path was followed
expect(result).toBe(false) // Not at max iterations yet
})
})
describe('edge cases and error handling', () => {
test('should handle empty loop nodes array', async () => {
const emptyLoop = createBasicLoop({ nodes: [] })
manager = new LoopManager({ 'loop-1': emptyLoop })
mockContext.workflow!.loops['loop-1'] = emptyLoop
mockContext.executedBlocks.add('loop-1')
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should complete immediately since there are no blocks to execute
expect(result).toBe(false)
})
test('should handle missing workflow in context', async () => {
mockContext.workflow = undefined
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(false)
})
test('should handle missing loop configuration', async () => {
// Remove loop from workflow
if (mockContext.workflow) {
mockContext.workflow.loops = {}
}
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should skip processing since loop config is missing
expect(result).toBe(false)
})
test('should handle forEach loop with invalid JSON string', async () => {
const forEachLoop = createForEachLoop('invalid json')
manager = new LoopManager({ 'loop-1': forEachLoop })
mockContext.workflow!.loops['loop-1'] = forEachLoop
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should handle gracefully and use default iterations
expect(result).toBe(false)
})
test('should handle forEach loop with null items', async () => {
const forEachLoop = createForEachLoop(null)
manager = new LoopManager({ 'loop-1': forEachLoop })
mockContext.workflow!.loops['loop-1'] = forEachLoop
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.loopIterations.set('loop-1', 1)
const result = await manager.processLoopIterations(mockContext)
// Should handle gracefully
expect(result).toBe(false)
})
})
describe('integration scenarios', () => {
test('should handle multiple loops in workflow', async () => {
const loops = {
'loop-1': createBasicLoop({ iterations: 2 }),
'loop-2': createBasicLoop({ id: 'loop-2', nodes: ['block-3'], iterations: 3 }),
}
manager = new LoopManager(loops)
// Set up context for both loops
mockContext.loopIterations.set('loop-1', 2) // loop-1 at max
mockContext.loopIterations.set('loop-2', 1) // loop-2 not at max
mockContext.executedBlocks.add('loop-1')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
// Set up loop execution states
mockContext.loopExecutions = new Map()
mockContext.loopExecutions.set('loop-1', {
maxIterations: 2,
loopType: 'for',
forEachItems: null,
executionResults: new Map([
['iteration_0', { iteration: { 'block-1': { result: 'result1' } } }],
['iteration_1', { iteration: { 'block-1': { result: 'result2' } } }],
]),
currentIteration: 2,
})
const result = await manager.processLoopIterations(mockContext)
expect(result).toBe(true) // loop-1 reached max iterations
expect(mockContext.completedLoops.has('loop-1')).toBe(true)
expect(mockContext.completedLoops.has('loop-2')).toBe(false)
})
test('should handle nested loop scenarios (loop inside another loop)', async () => {
// This tests the scenario where a loop block might be inside another loop
const outerLoop = createBasicLoop({
id: 'outer-loop',
nodes: ['inner-loop', 'block-1'],
iterations: 2,
})
const innerLoop = createBasicLoop({
id: 'inner-loop',
nodes: ['block-2'],
iterations: 3,
})
const loops = {
'outer-loop': outerLoop,
'inner-loop': innerLoop,
}
manager = new LoopManager(loops)
// Set up context - inner loop completed, outer loop still running
mockContext.loopIterations.set('outer-loop', 1)
mockContext.loopIterations.set('inner-loop', 3)
mockContext.executedBlocks.add('outer-loop')
mockContext.executedBlocks.add('inner-loop')
mockContext.executedBlocks.add('block-1')
mockContext.executedBlocks.add('block-2')
mockContext.completedLoops.add('inner-loop')
const result = await manager.processLoopIterations(mockContext)
// Should reset outer loop for next iteration
expect(result).toBe(false)
expect(mockContext.executedBlocks.has('inner-loop')).toBe(false)
expect(mockContext.executedBlocks.has('block-1')).toBe(false)
})
})
})

View File

@@ -1,439 +0,0 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/consts'
import type { ExecutionContext } from '@/executor/types'
import { ConnectionUtils } from '@/executor/utils/connections'
import type { SerializedBlock, SerializedConnection, SerializedLoop } from '@/serializer/types'
const logger = createLogger('LoopManager')
/**
* Manages loop detection, iteration limits, and state resets.
* With the new loop block approach, this class is significantly simplified.
*/
export class LoopManager {
constructor(
private loops: Record<string, SerializedLoop>,
private defaultIterations = 5
) {}
/**
* Processes all loops and checks if any need to be iterated.
* This is called after each execution layer to handle loop iterations.
*
* @param context - Current execution context
* @returns Whether any loop has reached its maximum iterations
*/
async processLoopIterations(context: ExecutionContext): Promise<boolean> {
let hasLoopReachedMaxIterations = false
// Nothing to do if no loops
if (Object.keys(this.loops).length === 0) return hasLoopReachedMaxIterations
// Check each loop to see if it should iterate
for (const [loopId, loop] of Object.entries(this.loops)) {
// Skip if this loop has already been marked as completed
if (context.completedLoops.has(loopId)) {
continue
}
// Check if the loop block itself has been executed
const loopBlockExecuted = context.executedBlocks.has(loopId)
if (!loopBlockExecuted) {
// Loop block hasn't been executed yet, skip processing
continue
}
// Check if all blocks in the loop have been executed
const allBlocksInLoopExecuted = this.allBlocksExecuted(loop.nodes, context)
if (allBlocksInLoopExecuted) {
// All blocks in the loop have been executed
const currentIteration = context.loopIterations.get(loopId) || 1
// Results are now stored individually as blocks execute (like parallels)
// No need for bulk collection here
// The loop block will handle incrementing the iteration when it executes next
// We just need to reset the blocks so they can run again
// Determine the maximum iterations
let maxIterations = loop.iterations || this.defaultIterations
// For forEach loops, use the actual items length
if (loop.loopType === 'forEach' && loop.forEachItems) {
// First check if the items have already been evaluated and stored by the loop handler
const storedItems = context.loopItems.get(`${loopId}_items`)
if (storedItems) {
const itemsLength = Array.isArray(storedItems)
? storedItems.length
: Object.keys(storedItems).length
maxIterations = itemsLength
logger.info(
`forEach loop ${loopId} - Items: ${itemsLength}, Max iterations: ${maxIterations}`
)
} else {
const itemsLength = this.getItemsLength(loop.forEachItems)
if (itemsLength > 0) {
maxIterations = itemsLength
logger.info(
`forEach loop ${loopId} - Parsed items: ${itemsLength}, Max iterations: ${maxIterations}`
)
}
}
} else if (loop.loopType === 'while' || loop.loopType === 'doWhile') {
// For while and doWhile loops, no max iteration limit
// They rely on the condition to stop (and workflow timeout as safety)
maxIterations = Number.MAX_SAFE_INTEGER
}
logger.info(`Loop ${loopId} - Current: ${currentIteration}, Max: ${maxIterations}`)
// Check if we've completed all iterations (only for for/forEach loops)
if (
currentIteration >= maxIterations &&
(loop.loopType === 'for' || loop.loopType === 'forEach')
) {
hasLoopReachedMaxIterations = true
logger.info(`Loop ${loopId} has completed all ${maxIterations} iterations`)
const results = []
const loopState = context.loopExecutions?.get(loopId)
if (loopState) {
for (let i = 0; i < maxIterations; i++) {
const result = loopState.executionResults.get(`iteration_${i}`)
if (result) {
results.push(result)
}
}
}
const aggregatedOutput = {
loopId,
currentIteration: maxIterations - 1, // Last iteration index
maxIterations,
loopType: loop.loopType || 'for',
completed: true,
results,
message: `Completed all ${maxIterations} iterations`,
}
context.blockStates.set(loopId, {
output: aggregatedOutput,
executed: true,
executionTime: 0,
})
context.completedLoops.add(loopId)
const loopEndConnections =
context.workflow?.connections.filter(
(conn) => conn.source === loopId && conn.sourceHandle === 'loop-end-source'
) || []
for (const conn of loopEndConnections) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated post-loop path from ${loopId} to ${conn.target}`)
}
logger.info(`Loop ${loopId} - Completed and activated end connections`)
} else {
// For while/doWhile loops, DON'T reset yet - let the loop handler check the condition first
// The loop handler will decide whether to continue or exit based on the condition
if (loop.loopType === 'while' || loop.loopType === 'doWhile') {
// Just reset the loop block itself so it can re-evaluate the condition
context.executedBlocks.delete(loopId)
context.blockStates.delete(loopId)
} else {
// For for/forEach loops, increment and reset everything as usual
context.loopIterations.set(loopId, currentIteration + 1)
this.resetLoopBlocks(loopId, loop, context)
context.executedBlocks.delete(loopId)
context.blockStates.delete(loopId)
}
}
}
}
return hasLoopReachedMaxIterations
}
/**
* Checks if all reachable blocks in a loop have been executed.
* This method now excludes completely unconnected blocks from consideration,
* ensuring they don't prevent loop completion.
*
* @param nodeIds - All node IDs in the loop
* @param context - Execution context
* @returns Whether all reachable blocks have been executed
*/
private allBlocksExecuted(nodeIds: string[], context: ExecutionContext): boolean {
return this.allReachableBlocksExecuted(nodeIds, context)
}
/**
* Helper method to check if all reachable blocks have been executed.
* Separated for clarity and potential future testing.
*/
private allReachableBlocksExecuted(nodeIds: string[], context: ExecutionContext): boolean {
// Get all connections within the loop
const loopConnections =
context.workflow?.connections.filter(
(conn) => nodeIds.includes(conn.source) && nodeIds.includes(conn.target)
) || []
// Build a map of blocks to their outgoing connections within the loop
const blockOutgoingConnections = new Map<string, typeof loopConnections>()
for (const nodeId of nodeIds) {
const outgoingConnections = ConnectionUtils.getOutgoingConnections(nodeId, loopConnections)
blockOutgoingConnections.set(nodeId, outgoingConnections)
}
// Find blocks that have no incoming connections within the loop (entry points)
// Only consider blocks as entry points if they have external connections to the loop
const entryBlocks = nodeIds.filter((nodeId) =>
ConnectionUtils.isEntryPoint(nodeId, nodeIds, context.workflow?.connections || [])
)
// Track which blocks we've visited and determined are reachable
const reachableBlocks = new Set<string>()
const toVisit = [...entryBlocks]
// Traverse the graph to find all reachable blocks
while (toVisit.length > 0) {
const currentBlockId = toVisit.shift()!
// Skip if already visited
if (reachableBlocks.has(currentBlockId)) continue
reachableBlocks.add(currentBlockId)
// Get the block
const block = context.workflow?.blocks.find((b) => b.id === currentBlockId)
if (!block) continue
// Get outgoing connections from this block
const outgoing = blockOutgoingConnections.get(currentBlockId) || []
// Handle routing blocks specially
if (block.metadata?.id === BlockType.ROUTER) {
// For router blocks, only follow the selected path
const selectedTarget = context.decisions.router.get(currentBlockId)
if (selectedTarget && nodeIds.includes(selectedTarget)) {
toVisit.push(selectedTarget)
}
} else if (block.metadata?.id === BlockType.CONDITION) {
// For condition blocks, only follow the selected condition path
const selectedConditionId = context.decisions.condition.get(currentBlockId)
if (selectedConditionId) {
const selectedConnection = outgoing.find(
(conn) => conn.sourceHandle === `condition-${selectedConditionId}`
)
if (selectedConnection?.target) {
toVisit.push(selectedConnection.target)
}
}
} else {
// For regular blocks, use the extracted error handling method
this.handleErrorConnections(currentBlockId, outgoing, context, toVisit)
}
}
// Now check if all reachable blocks have been executed
for (const reachableBlockId of reachableBlocks) {
if (!context.executedBlocks.has(reachableBlockId)) {
logger.info(
`Loop iteration not complete - block ${reachableBlockId} is reachable but not executed`
)
return false
}
}
logger.info(
`All reachable blocks in loop have been executed. Reachable: ${Array.from(reachableBlocks).join(', ')}`
)
return true
}
/**
* Helper to get the length of items for forEach loops
*/
private getItemsLength(forEachItems: any): number {
if (Array.isArray(forEachItems)) {
return forEachItems.length
}
if (typeof forEachItems === 'object' && forEachItems !== null) {
return Object.keys(forEachItems).length
}
if (typeof forEachItems === 'string') {
try {
const parsed = JSON.parse(forEachItems)
if (Array.isArray(parsed)) {
return parsed.length
}
if (typeof parsed === 'object' && parsed !== null) {
return Object.keys(parsed).length
}
} catch {}
}
return 0
}
/**
* Resets all blocks within a loop for the next iteration.
*
* @param loopId - ID of the loop
* @param loop - The loop configuration
* @param context - Current execution context
*/
private resetLoopBlocks(loopId: string, loop: SerializedLoop, context: ExecutionContext): void {
// Reset all blocks in the loop
for (const nodeId of loop.nodes) {
context.executedBlocks.delete(nodeId)
context.blockStates.delete(nodeId)
context.activeExecutionPath.delete(nodeId)
context.decisions.router.delete(nodeId)
context.decisions.condition.delete(nodeId)
}
}
/**
* Stores the result of a loop iteration.
*/
storeIterationResult(
context: ExecutionContext,
loopId: string,
iterationIndex: number,
output: any
): void {
if (!context.loopExecutions) {
context.loopExecutions = new Map()
}
let loopState = context.loopExecutions.get(loopId)
if (!loopState) {
const loop = this.loops[loopId]
const loopType = loop?.loopType === 'forEach' ? 'forEach' : 'for'
const forEachItems = loop?.forEachItems
loopState = {
maxIterations: loop?.iterations || this.defaultIterations,
loopType,
forEachItems:
Array.isArray(forEachItems) || (typeof forEachItems === 'object' && forEachItems !== null)
? forEachItems
: null,
executionResults: new Map(),
currentIteration: 0,
}
context.loopExecutions.set(loopId, loopState)
}
const iterationKey = `iteration_${iterationIndex}`
const existingResult = loopState.executionResults.get(iterationKey)
if (existingResult) {
if (Array.isArray(existingResult)) {
existingResult.push(output)
} else {
loopState.executionResults.set(iterationKey, [existingResult, output])
}
} else {
loopState.executionResults.set(iterationKey, output)
}
}
/**
* Gets the correct loop index based on the current block being executed.
*
* @param loopId - ID of the loop
* @param blockId - ID of the block requesting the index
* @param context - Current execution context
* @returns The correct loop index for this block
*/
getLoopIndex(loopId: string, blockId: string, context: ExecutionContext): number {
const loop = this.loops[loopId]
if (!loop) return 0
// Return the current iteration counter
return context.loopIterations.get(loopId) || 0
}
/**
* Gets the iterations for a loop.
*
* @param loopId - ID of the loop
* @returns Iterations for the loop
*/
getIterations(loopId: string): number {
return this.loops[loopId]?.iterations || this.defaultIterations
}
/**
* Gets the current item for a forEach loop.
*
* @param loopId - ID of the loop
* @param context - Current execution context
* @returns Current item in the loop iteration
*/
getCurrentItem(loopId: string, context: ExecutionContext): any {
return context.loopItems.get(loopId)
}
/**
* Checks if a connection forms a feedback path in a loop.
* With loop blocks, feedback paths are now handled by loop-to-inner-block connections.
*
* @param connection - Connection to check
* @param blocks - All blocks in the workflow
* @returns Whether the connection forms a feedback path
*/
isFeedbackPath(connection: SerializedConnection, blocks: SerializedBlock[]): boolean {
// With the new loop block approach, feedback paths are connections from
// blocks inside the loop back to the loop block itself
for (const [loopId, loop] of Object.entries(this.loops)) {
// Use Set for O(1) lookup performance instead of O(n) includes()
const loopNodesSet = new Set(loop.nodes)
// Check if source is inside the loop and target is the loop block
if (loopNodesSet.has(connection.source) && connection.target === loopId) {
return true
}
}
return false
}
/**
* Handles error connections and follows appropriate paths based on error state.
*
* @param blockId - ID of the block to check for error handling
* @param outgoing - Outgoing connections from the block
* @param context - Current execution context
* @param toVisit - Array to add next blocks to visit
*/
private handleErrorConnections(
blockId: string,
outgoing: any[],
context: ExecutionContext,
toVisit: string[]
): void {
// For regular blocks, check if they had an error
const blockState = context.blockStates.get(blockId)
const hasError = blockState?.output?.error !== undefined
// Follow appropriate connections based on error state
for (const conn of outgoing) {
if (conn.sourceHandle === 'error' && hasError) {
toVisit.push(conn.target)
} else if ((conn.sourceHandle === 'source' || !conn.sourceHandle) && !hasError) {
toVisit.push(conn.target)
}
}
}
}

View File

@@ -0,0 +1,381 @@
import { createLogger } from '@/lib/logs/console/logger'
import { buildLoopIndexCondition, DEFAULTS, EDGE } from '@/executor/consts'
import type { ExecutionContext, NormalizedBlockOutput } from '@/executor/types'
import type { LoopConfigWithNodes } from '@/executor/types/loop'
import {
buildSentinelEndId,
buildSentinelStartId,
extractBaseBlockId,
} from '@/executor/utils/subflow-utils'
import type { SerializedLoop } from '@/serializer/types'
import type { DAG } from '../dag/builder'
import type { ExecutionState, LoopScope } from '../execution/state'
import type { VariableResolver } from '../variables/resolver'
const logger = createLogger('LoopOrchestrator')
export type LoopRoute = typeof EDGE.LOOP_CONTINUE | typeof EDGE.LOOP_EXIT
export interface LoopContinuationResult {
shouldContinue: boolean
shouldExit: boolean
selectedRoute: LoopRoute
aggregatedResults?: NormalizedBlockOutput[][]
currentIteration?: number
}
export class LoopOrchestrator {
constructor(
private dag: DAG,
private state: ExecutionState,
private resolver: VariableResolver
) {}
initializeLoopScope(ctx: ExecutionContext, loopId: string): LoopScope {
const loopConfig = this.dag.loopConfigs.get(loopId) as SerializedLoop | undefined
if (!loopConfig) {
throw new Error(`Loop config not found: ${loopId}`)
}
const scope: LoopScope = {
iteration: 0,
currentIterationOutputs: new Map(),
allIterationOutputs: [],
}
const loopType = loopConfig.loopType
logger.debug('Initializing loop scope', { loopId, loopType })
switch (loopType) {
case 'for':
scope.maxIterations = loopConfig.iterations || DEFAULTS.MAX_LOOP_ITERATIONS
scope.condition = buildLoopIndexCondition(scope.maxIterations)
logger.debug('For loop initialized', { loopId, maxIterations: scope.maxIterations })
break
case 'forEach': {
const items = this.resolveForEachItems(ctx, loopConfig.forEachItems)
scope.items = items
scope.maxIterations = items.length
scope.item = items[0]
scope.condition = buildLoopIndexCondition(scope.maxIterations)
logger.debug('ForEach loop initialized', { loopId, itemCount: items.length })
break
}
case 'while':
scope.condition = loopConfig.whileCondition
logger.debug('While loop initialized', { loopId, condition: scope.condition })
break
case 'doWhile':
if (loopConfig.doWhileCondition) {
scope.condition = loopConfig.doWhileCondition
} else {
scope.maxIterations = loopConfig.iterations || DEFAULTS.MAX_LOOP_ITERATIONS
scope.condition = buildLoopIndexCondition(scope.maxIterations)
}
scope.skipFirstConditionCheck = true
logger.debug('DoWhile loop initialized', { loopId, condition: scope.condition })
break
default:
throw new Error(`Unknown loop type: ${loopType}`)
}
this.state.setLoopScope(loopId, scope)
return scope
}
storeLoopNodeOutput(
ctx: ExecutionContext,
loopId: string,
nodeId: string,
output: NormalizedBlockOutput
): void {
const scope = this.state.getLoopScope(loopId)
if (!scope) {
logger.warn('Loop scope not found for node output storage', { loopId, nodeId })
return
}
const baseId = extractBaseBlockId(nodeId)
scope.currentIterationOutputs.set(baseId, output)
logger.debug('Stored loop node output', {
loopId,
nodeId: baseId,
iteration: scope.iteration,
outputsCount: scope.currentIterationOutputs.size,
})
}
evaluateLoopContinuation(ctx: ExecutionContext, loopId: string): LoopContinuationResult {
const scope = this.state.getLoopScope(loopId)
if (!scope) {
logger.error('Loop scope not found during continuation evaluation', { loopId })
return {
shouldContinue: false,
shouldExit: true,
selectedRoute: EDGE.LOOP_EXIT,
}
}
const iterationResults: NormalizedBlockOutput[] = []
for (const blockOutput of scope.currentIterationOutputs.values()) {
iterationResults.push(blockOutput)
}
if (iterationResults.length > 0) {
scope.allIterationOutputs.push(iterationResults)
logger.debug('Collected iteration results', {
loopId,
iteration: scope.iteration,
resultsCount: iterationResults.length,
})
}
scope.currentIterationOutputs.clear()
const isFirstIteration = scope.iteration === 0
const shouldSkipFirstCheck = scope.skipFirstConditionCheck && isFirstIteration
if (!shouldSkipFirstCheck) {
if (!this.evaluateCondition(ctx, scope, scope.iteration + 1)) {
logger.debug('Loop condition false for next iteration - exiting', {
loopId,
currentIteration: scope.iteration,
nextIteration: scope.iteration + 1,
})
return this.createExitResult(ctx, loopId, scope)
}
}
scope.iteration++
if (scope.items && scope.iteration < scope.items.length) {
scope.item = scope.items[scope.iteration]
}
logger.debug('Loop will continue', {
loopId,
nextIteration: scope.iteration,
})
return {
shouldContinue: true,
shouldExit: false,
selectedRoute: EDGE.LOOP_CONTINUE,
currentIteration: scope.iteration,
}
}
private createExitResult(
ctx: ExecutionContext,
loopId: string,
scope: LoopScope
): LoopContinuationResult {
const results = scope.allIterationOutputs
ctx.blockStates?.set(loopId, {
output: { results },
executed: true,
executionTime: DEFAULTS.EXECUTION_TIME,
})
logger.debug('Loop exiting', { loopId, totalIterations: scope.iteration })
return {
shouldContinue: false,
shouldExit: true,
selectedRoute: EDGE.LOOP_EXIT,
aggregatedResults: results,
currentIteration: scope.iteration,
}
}
private evaluateCondition(ctx: ExecutionContext, scope: LoopScope, iteration?: number): boolean {
if (!scope.condition) {
logger.warn('No condition defined for loop')
return false
}
const currentIteration = scope.iteration
if (iteration !== undefined) {
scope.iteration = iteration
}
const result = this.evaluateWhileCondition(ctx, scope.condition, scope)
if (iteration !== undefined) {
scope.iteration = currentIteration
}
return result
}
clearLoopExecutionState(loopId: string, executedBlocks: Set<string>): void {
const loopConfig = this.dag.loopConfigs.get(loopId) as LoopConfigWithNodes | undefined
if (!loopConfig) {
logger.warn('Loop config not found for state clearing', { loopId })
return
}
const sentinelStartId = buildSentinelStartId(loopId)
const sentinelEndId = buildSentinelEndId(loopId)
const loopNodes = loopConfig.nodes
executedBlocks.delete(sentinelStartId)
executedBlocks.delete(sentinelEndId)
for (const loopNodeId of loopNodes) {
executedBlocks.delete(loopNodeId)
}
logger.debug('Cleared loop execution state', {
loopId,
nodesCleared: loopNodes.length + 2,
})
}
restoreLoopEdges(loopId: string): void {
const loopConfig = this.dag.loopConfigs.get(loopId) as LoopConfigWithNodes | undefined
if (!loopConfig) {
logger.warn('Loop config not found for edge restoration', { loopId })
return
}
const sentinelStartId = buildSentinelStartId(loopId)
const sentinelEndId = buildSentinelEndId(loopId)
const loopNodes = loopConfig.nodes
const allLoopNodeIds = new Set([sentinelStartId, sentinelEndId, ...loopNodes])
let restoredCount = 0
for (const nodeId of allLoopNodeIds) {
const nodeToRestore = this.dag.nodes.get(nodeId)
if (!nodeToRestore) continue
for (const [potentialSourceId, potentialSourceNode] of this.dag.nodes) {
if (!allLoopNodeIds.has(potentialSourceId)) continue
for (const [_, edge] of potentialSourceNode.outgoingEdges) {
if (edge.target === nodeId) {
const isBackwardEdge =
edge.sourceHandle === EDGE.LOOP_CONTINUE ||
edge.sourceHandle === EDGE.LOOP_CONTINUE_ALT
if (!isBackwardEdge) {
nodeToRestore.incomingEdges.add(potentialSourceId)
restoredCount++
}
}
}
}
}
logger.debug('Restored loop edges', { loopId, edgesRestored: restoredCount })
}
getLoopScope(loopId: string): LoopScope | undefined {
return this.state.getLoopScope(loopId)
}
shouldExecuteLoopNode(nodeId: string, loopId: string, context: ExecutionContext): boolean {
return true
}
private findLoopForNode(nodeId: string): string | undefined {
for (const [loopId, config] of this.dag.loopConfigs) {
const nodes = (config as any).nodes || []
if (nodes.includes(nodeId)) {
return loopId
}
}
return undefined
}
private evaluateWhileCondition(
ctx: ExecutionContext,
condition: string,
scope: LoopScope
): boolean {
if (!condition) {
return false
}
try {
const referencePattern = /<([^>]+)>/g
let evaluatedCondition = condition
const replacements: Record<string, string> = {}
evaluatedCondition = evaluatedCondition.replace(referencePattern, (match) => {
const resolved = this.resolver.resolveSingleReference(ctx, '', match, scope)
if (resolved !== undefined) {
if (typeof resolved === 'string') {
replacements[match] = `"${resolved}"`
return `"${resolved}"`
}
replacements[match] = String(resolved)
return String(resolved)
}
return match
})
const result = Boolean(new Function(`return (${evaluatedCondition})`)())
logger.debug('Evaluated loop condition', {
condition,
replacements,
evaluatedCondition,
result,
iteration: scope.iteration,
})
return result
} catch (error) {
logger.error('Failed to evaluate loop condition', { condition, error })
return false
}
}
private resolveForEachItems(ctx: ExecutionContext, items: any): any[] {
if (Array.isArray(items)) {
return items
}
if (typeof items === 'object' && items !== null) {
return Object.entries(items)
}
if (typeof items === 'string') {
if (items.startsWith('<') && items.endsWith('>')) {
const resolved = this.resolver.resolveSingleReference(ctx, '', items)
return Array.isArray(resolved) ? resolved : []
}
try {
const normalized = items.replace(/'/g, '"')
const parsed = JSON.parse(normalized)
return Array.isArray(parsed) ? parsed : []
} catch (error) {
logger.error('Failed to parse forEach items', { items, error })
return []
}
}
try {
const resolved = this.resolver.resolveInputs(ctx, 'loop_foreach_items', { items }).items
if (Array.isArray(resolved)) {
return resolved
}
logger.warn('ForEach items did not resolve to array', {
items,
resolved,
})
return []
} catch (error: any) {
logger.error('Error resolving forEach items, returning empty array:', {
error: error.message,
})
return []
}
}
}

View File

@@ -0,0 +1,227 @@
import { createLogger } from '@/lib/logs/console/logger'
import { EDGE } from '@/executor/consts'
import type { ExecutionContext, NormalizedBlockOutput } from '@/executor/types'
import { extractBaseBlockId } from '@/executor/utils/subflow-utils'
import type { DAG, DAGNode } from '../dag/builder'
import type { BlockExecutor } from '../execution/block-executor'
import type { ExecutionState } from '../execution/state'
import type { LoopOrchestrator } from './loop'
import type { ParallelOrchestrator } from './parallel'
const logger = createLogger('NodeExecutionOrchestrator')
export interface NodeExecutionResult {
nodeId: string
output: NormalizedBlockOutput
isFinalOutput: boolean
}
export class NodeExecutionOrchestrator {
constructor(
private dag: DAG,
private state: ExecutionState,
private blockExecutor: BlockExecutor,
private loopOrchestrator: LoopOrchestrator,
private parallelOrchestrator: ParallelOrchestrator
) {}
async executeNode(nodeId: string, context: any): Promise<NodeExecutionResult> {
const node = this.dag.nodes.get(nodeId)
if (!node) {
throw new Error(`Node not found in DAG: ${nodeId}`)
}
if (this.state.hasExecuted(nodeId)) {
logger.debug('Node already executed, skipping', { nodeId })
const output = this.state.getBlockOutput(nodeId) || {}
return {
nodeId,
output,
isFinalOutput: false,
}
}
const loopId = node.metadata.loopId
if (loopId && !this.loopOrchestrator.getLoopScope(loopId)) {
logger.debug('Initializing loop scope before first execution', { loopId, nodeId })
this.loopOrchestrator.initializeLoopScope(context, loopId)
}
if (loopId && !this.loopOrchestrator.shouldExecuteLoopNode(nodeId, loopId, context)) {
logger.debug('Loop node should not execute', { nodeId, loopId })
return {
nodeId,
output: {},
isFinalOutput: false,
}
}
if (node.metadata.isSentinel) {
logger.debug('Executing sentinel node', {
nodeId,
sentinelType: node.metadata.sentinelType,
loopId,
})
const output = this.handleSentinel(node, context)
const isFinalOutput = node.outgoingEdges.size === 0
return {
nodeId,
output,
isFinalOutput,
}
}
logger.debug('Executing node', { nodeId, blockType: node.block.metadata?.id })
const output = await this.blockExecutor.execute(context, node, node.block)
const isFinalOutput = node.outgoingEdges.size === 0
return {
nodeId,
output,
isFinalOutput,
}
}
private handleSentinel(node: DAGNode, context: any): NormalizedBlockOutput {
const sentinelType = node.metadata.sentinelType
const loopId = node.metadata.loopId
if (sentinelType === 'start') {
logger.debug('Sentinel start - loop entry', { nodeId: node.id, loopId })
return { sentinelStart: true }
}
if (sentinelType === 'end') {
logger.debug('Sentinel end - evaluating loop continuation', { nodeId: node.id, loopId })
if (!loopId) {
logger.warn('Sentinel end called without loopId')
return { shouldExit: true, selectedRoute: EDGE.LOOP_EXIT }
}
const continuationResult = this.loopOrchestrator.evaluateLoopContinuation(context, loopId)
logger.debug('Loop continuation evaluated', {
loopId,
shouldContinue: continuationResult.shouldContinue,
shouldExit: continuationResult.shouldExit,
iteration: continuationResult.currentIteration,
})
if (continuationResult.shouldContinue) {
return {
shouldContinue: true,
shouldExit: false,
selectedRoute: continuationResult.selectedRoute,
loopIteration: continuationResult.currentIteration,
}
}
return {
results: continuationResult.aggregatedResults || [],
shouldContinue: false,
shouldExit: true,
selectedRoute: continuationResult.selectedRoute,
totalIterations: continuationResult.aggregatedResults?.length || 0,
}
}
logger.warn('Unknown sentinel type', { sentinelType })
return {}
}
async handleNodeCompletion(
nodeId: string,
output: NormalizedBlockOutput,
context: any
): Promise<void> {
const node = this.dag.nodes.get(nodeId)
if (!node) {
logger.error('Node not found during completion handling', { nodeId })
return
}
logger.debug('Handling node completion', {
nodeId: node.id,
hasLoopId: !!node.metadata.loopId,
isParallelBranch: !!node.metadata.isParallelBranch,
isSentinel: !!node.metadata.isSentinel,
})
const loopId = node.metadata.loopId
const isParallelBranch = node.metadata.isParallelBranch
const isSentinel = node.metadata.isSentinel
if (isSentinel) {
logger.debug('Handling sentinel node', { nodeId: node.id, loopId })
this.handleRegularNodeCompletion(node, output, context)
} else if (loopId) {
logger.debug('Handling loop node', { nodeId: node.id, loopId })
this.handleLoopNodeCompletion(node, output, loopId, context)
} else if (isParallelBranch) {
const parallelId = this.findParallelIdForNode(node.id)
if (parallelId) {
logger.debug('Handling parallel node', { nodeId: node.id, parallelId })
this.handleParallelNodeCompletion(node, output, parallelId)
} else {
this.handleRegularNodeCompletion(node, output, context)
}
} else {
logger.debug('Handling regular node', { nodeId: node.id })
this.handleRegularNodeCompletion(node, output, context)
}
}
private handleLoopNodeCompletion(
node: DAGNode,
output: NormalizedBlockOutput,
loopId: string,
context: ExecutionContext
): void {
this.loopOrchestrator.storeLoopNodeOutput(context, loopId, node.id, output)
this.state.setBlockOutput(node.id, output)
}
private handleParallelNodeCompletion(
node: DAGNode,
output: NormalizedBlockOutput,
parallelId: string
): void {
const scope = this.parallelOrchestrator.getParallelScope(parallelId)
if (!scope) {
const totalBranches = node.metadata.branchTotal || 1
const parallelConfig = this.dag.parallelConfigs.get(parallelId)
const nodesInParallel = (parallelConfig as any)?.nodes?.length || 1
this.parallelOrchestrator.initializeParallelScope(parallelId, totalBranches, nodesInParallel)
}
const allComplete = this.parallelOrchestrator.handleParallelBranchCompletion(
parallelId,
node.id,
output
)
if (allComplete) {
this.parallelOrchestrator.aggregateParallelResults(parallelId)
}
this.state.setBlockOutput(node.id, output)
}
private handleRegularNodeCompletion(
node: DAGNode,
output: NormalizedBlockOutput,
context: any
): void {
this.state.setBlockOutput(node.id, output)
if (
node.metadata.isSentinel &&
node.metadata.sentinelType === 'end' &&
output.selectedRoute === 'loop_continue'
) {
const loopId = node.metadata.loopId
if (loopId) {
logger.debug('Preparing loop for next iteration', { loopId })
this.loopOrchestrator.clearLoopExecutionState(loopId, this.state.executedBlocks)
this.loopOrchestrator.restoreLoopEdges(loopId)
}
}
}
private findParallelIdForNode(nodeId: string): string | undefined {
const baseId = extractBaseBlockId(nodeId)
return this.parallelOrchestrator.findParallelIdForNode(baseId)
}
}

View File

@@ -0,0 +1,181 @@
import { createLogger } from '@/lib/logs/console/logger'
import type { NormalizedBlockOutput } from '@/executor/types'
import type { ParallelConfigWithNodes } from '@/executor/types/parallel'
import {
calculateBranchCount,
extractBaseBlockId,
extractBranchIndex,
parseDistributionItems,
} from '@/executor/utils/subflow-utils'
import type { SerializedParallel } from '@/serializer/types'
import type { DAG } from '../dag/builder'
import type { ExecutionState, ParallelScope } from '../execution/state'
const logger = createLogger('ParallelOrchestrator')
export interface ParallelBranchMetadata {
branchIndex: number
branchTotal: number
distributionItem?: any
parallelId: string
}
export interface ParallelAggregationResult {
allBranchesComplete: boolean
results?: NormalizedBlockOutput[][]
completedBranches?: number
totalBranches?: number
}
export class ParallelOrchestrator {
constructor(
private dag: DAG,
private state: ExecutionState
) {}
initializeParallelScope(
parallelId: string,
totalBranches: number,
terminalNodesCount = 1
): ParallelScope {
const scope: ParallelScope = {
parallelId,
totalBranches,
branchOutputs: new Map(),
completedCount: 0,
totalExpectedNodes: totalBranches * terminalNodesCount,
}
this.state.setParallelScope(parallelId, scope)
logger.debug('Initialized parallel scope', {
parallelId,
totalBranches,
terminalNodesCount,
totalExpectedNodes: scope.totalExpectedNodes,
})
return scope
}
handleParallelBranchCompletion(
parallelId: string,
nodeId: string,
output: NormalizedBlockOutput
): boolean {
const scope = this.state.getParallelScope(parallelId)
if (!scope) {
logger.warn('Parallel scope not found for branch completion', { parallelId, nodeId })
return false
}
const branchIndex = extractBranchIndex(nodeId)
if (branchIndex === null) {
logger.warn('Could not extract branch index from node ID', { nodeId })
return false
}
if (!scope.branchOutputs.has(branchIndex)) {
scope.branchOutputs.set(branchIndex, [])
}
scope.branchOutputs.get(branchIndex)!.push(output)
scope.completedCount++
logger.debug('Recorded parallel branch output', {
parallelId,
branchIndex,
nodeId,
completedCount: scope.completedCount,
totalExpected: scope.totalExpectedNodes,
})
const allComplete = scope.completedCount >= scope.totalExpectedNodes
if (allComplete) {
logger.debug('All parallel branches completed', {
parallelId,
totalBranches: scope.totalBranches,
completedNodes: scope.completedCount,
})
}
return allComplete
}
aggregateParallelResults(parallelId: string): ParallelAggregationResult {
const scope = this.state.getParallelScope(parallelId)
if (!scope) {
logger.error('Parallel scope not found for aggregation', { parallelId })
return { allBranchesComplete: false }
}
const results: NormalizedBlockOutput[][] = []
for (let i = 0; i < scope.totalBranches; i++) {
const branchOutputs = scope.branchOutputs.get(i) || []
results.push(branchOutputs)
}
this.state.setBlockOutput(parallelId, {
results,
})
logger.debug('Aggregated parallel results', {
parallelId,
totalBranches: scope.totalBranches,
nodesPerBranch: results[0]?.length || 0,
totalOutputs: scope.completedCount,
})
return {
allBranchesComplete: true,
results,
completedBranches: scope.totalBranches,
totalBranches: scope.totalBranches,
}
}
extractBranchMetadata(nodeId: string): ParallelBranchMetadata | null {
const branchIndex = extractBranchIndex(nodeId)
if (branchIndex === null) {
return null
}
const baseId = extractBaseBlockId(nodeId)
const parallelId = this.findParallelIdForNode(baseId)
if (!parallelId) {
return null
}
const parallelConfig = this.dag.parallelConfigs.get(parallelId)
if (!parallelConfig) {
return null
}
const { totalBranches, distributionItem } = this.getParallelConfigInfo(
parallelConfig,
branchIndex
)
return {
branchIndex,
branchTotal: totalBranches,
distributionItem,
parallelId,
}
}
getParallelScope(parallelId: string): ParallelScope | undefined {
return this.state.getParallelScope(parallelId)
}
findParallelIdForNode(baseNodeId: string): string | undefined {
for (const [parallelId, config] of this.dag.parallelConfigs) {
const parallelConfig = config as ParallelConfigWithNodes
if (parallelConfig.nodes?.includes(baseNodeId)) {
return parallelId
}
}
return undefined
}
private getParallelConfigInfo(
parallelConfig: SerializedParallel,
branchIndex: number
): { totalBranches: number; distributionItem?: any } {
const distributionItems = parseDistributionItems(parallelConfig)
const totalBranches = calculateBranchCount(parallelConfig, distributionItems)
let distributionItem: any
if (Array.isArray(distributionItems) && branchIndex < distributionItems.length) {
distributionItem = distributionItems[branchIndex]
}
return { totalBranches, distributionItem }
}
}

View File

@@ -1,389 +0,0 @@
import { describe, expect, test, vi } from 'vitest'
import { createParallelExecutionState } from '@/executor/__test-utils__/executor-mocks'
import { BlockType } from '@/executor/consts'
import { ParallelManager } from '@/executor/parallels/parallels'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: () => ({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
}),
}))
describe('ParallelManager', () => {
const createMockContext = (): ExecutionContext => ({
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { startTime: new Date().toISOString(), duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow: { blocks: [], connections: [], loops: {}, parallels: {}, version: '2.0' },
parallelExecutions: new Map(),
})
describe('initializeParallel', () => {
test('should initialize parallel state for array distribution', () => {
const manager = new ParallelManager()
const items = ['apple', 'banana', 'cherry']
const state = manager.initializeParallel('parallel-1', items)
expect(state.parallelCount).toBe(3)
expect(state.distributionItems).toEqual(items)
expect(state.completedExecutions).toBe(0)
expect(state.executionResults).toBeInstanceOf(Map)
expect(state.activeIterations).toBeInstanceOf(Set)
expect(state.currentIteration).toBe(1)
})
test('should initialize parallel state for object distribution', () => {
const manager = new ParallelManager()
const items = { first: 'alpha', second: 'beta', third: 'gamma' }
const state = manager.initializeParallel('parallel-1', items)
expect(state.parallelCount).toBe(3)
expect(state.distributionItems).toEqual(items)
})
})
describe('getIterationItem', () => {
test('should get item from array distribution', () => {
const manager = new ParallelManager()
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: ['apple', 'banana', 'cherry'],
})
expect(manager.getIterationItem(state, 0)).toBe('apple')
expect(manager.getIterationItem(state, 1)).toBe('banana')
expect(manager.getIterationItem(state, 2)).toBe('cherry')
})
test('should get entry from object distribution', () => {
const manager = new ParallelManager()
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: { first: 'alpha', second: 'beta', third: 'gamma' },
})
expect(manager.getIterationItem(state, 0)).toEqual(['first', 'alpha'])
expect(manager.getIterationItem(state, 1)).toEqual(['second', 'beta'])
expect(manager.getIterationItem(state, 2)).toEqual(['third', 'gamma'])
})
test('should return null for null distribution items', () => {
const manager = new ParallelManager()
const state = createParallelExecutionState({
parallelCount: 0,
distributionItems: null,
})
expect(manager.getIterationItem(state, 0)).toBeNull()
})
})
describe('areAllVirtualBlocksExecuted', () => {
test('should return true when all virtual blocks are executed', () => {
const manager = new ParallelManager()
const executedBlocks = new Set([
'func-1_parallel_parallel-1_iteration_0',
'func-1_parallel_parallel-1_iteration_1',
'func-1_parallel_parallel-1_iteration_2',
])
const parallel = {
id: 'parallel-1',
nodes: ['func-1'],
distribution: ['a', 'b', 'c'],
}
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
})
const context = {
workflow: {
blocks: [],
connections: [],
},
decisions: {
condition: new Map(),
router: new Map(),
},
executedBlocks: new Set(),
} as any
const result = manager.areAllVirtualBlocksExecuted(
'parallel-1',
parallel,
executedBlocks,
state,
context
)
expect(result).toBe(true)
})
test('should return false when some virtual blocks are not executed', () => {
const manager = new ParallelManager()
const executedBlocks = new Set([
'func-1_parallel_parallel-1_iteration_0',
'func-1_parallel_parallel-1_iteration_1',
// Missing iteration_2
])
const parallel = {
id: 'parallel-1',
nodes: ['func-1'],
distribution: ['a', 'b', 'c'],
}
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
})
// Create context with external connection to make func-1 a legitimate entry point
const context = {
workflow: {
blocks: [{ id: 'func-1', metadata: { id: 'function' } }],
connections: [
{
source: 'external-block',
target: 'func-1',
sourceHandle: 'output',
targetHandle: 'input',
},
],
},
decisions: {
condition: new Map(),
router: new Map(),
},
executedBlocks: new Set(),
} as any
const result = manager.areAllVirtualBlocksExecuted(
'parallel-1',
parallel,
executedBlocks,
state,
context
)
expect(result).toBe(false)
})
})
describe('createVirtualBlockInstances', () => {
test('should create virtual block instances for unexecuted blocks', () => {
const manager = new ParallelManager()
const block = {
id: 'func-1',
position: { x: 0, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
}
const executedBlocks = new Set(['func-1_parallel_parallel-1_iteration_0'])
const activeExecutionPath = new Set(['func-1'])
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
})
const virtualIds = manager.createVirtualBlockInstances(
block,
'parallel-1',
state,
executedBlocks,
activeExecutionPath
)
expect(virtualIds).toEqual([
'func-1_parallel_parallel-1_iteration_1',
'func-1_parallel_parallel-1_iteration_2',
])
})
test('should skip blocks not in active execution path', () => {
const manager = new ParallelManager()
const block = {
id: 'func-1',
position: { x: 0, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
}
const executedBlocks = new Set<string>()
const activeExecutionPath = new Set<string>() // Block not in active path
const state = createParallelExecutionState({
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
})
const virtualIds = manager.createVirtualBlockInstances(
block,
'parallel-1',
state,
executedBlocks,
activeExecutionPath
)
expect(virtualIds).toEqual([])
})
})
describe('setupIterationContext', () => {
test('should set up context for array distribution', () => {
const manager = new ParallelManager()
const context = createMockContext()
const state = {
parallelCount: 3,
distributionItems: ['apple', 'banana', 'cherry'],
completedExecutions: 0,
executionResults: new Map(),
activeIterations: new Set<number>(),
currentIteration: 1,
}
context.parallelExecutions?.set('parallel-1', state)
manager.setupIterationContext(context, {
parallelId: 'parallel-1',
iterationIndex: 1,
})
expect(context.loopItems.get('parallel-1_iteration_1')).toBe('banana')
expect(context.loopItems.get('parallel-1')).toBe('banana')
expect(context.loopIterations.get('parallel-1')).toBe(1)
})
test('should set up context for object distribution', () => {
const manager = new ParallelManager()
const context = createMockContext()
const state = createParallelExecutionState({
parallelCount: 2,
distributionItems: { key1: 'value1', key2: 'value2' },
})
context.parallelExecutions?.set('parallel-1', state)
manager.setupIterationContext(context, {
parallelId: 'parallel-1',
iterationIndex: 0,
})
expect(context.loopItems.get('parallel-1_iteration_0')).toEqual(['key1', 'value1'])
expect(context.loopItems.get('parallel-1')).toEqual(['key1', 'value1'])
expect(context.loopIterations.get('parallel-1')).toBe(0)
})
})
describe('storeIterationResult', () => {
test('should store iteration result in parallel state', () => {
const manager = new ParallelManager()
const context = createMockContext()
const state = {
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
completedExecutions: 0,
executionResults: new Map(),
activeIterations: new Set<number>(),
currentIteration: 1,
}
context.parallelExecutions?.set('parallel-1', state)
const output = { result: 'test result' }
manager.storeIterationResult(context, 'parallel-1', 1, output)
expect(state.executionResults.get('iteration_1')).toEqual(output)
})
})
describe('processParallelIterations', () => {
test('should re-execute parallel block when all virtual blocks are complete', async () => {
const parallels: SerializedWorkflow['parallels'] = {
'parallel-1': {
id: 'parallel-1',
nodes: ['func-1'],
distribution: ['a', 'b', 'c'],
},
}
const manager = new ParallelManager(parallels)
const context = createMockContext()
// Set up context as if parallel has been executed and all virtual blocks completed
context.executedBlocks.add('parallel-1')
context.executedBlocks.add('func-1_parallel_parallel-1_iteration_0')
context.executedBlocks.add('func-1_parallel_parallel-1_iteration_1')
context.executedBlocks.add('func-1_parallel_parallel-1_iteration_2')
const state = {
parallelCount: 3,
distributionItems: ['a', 'b', 'c'],
completedExecutions: 0,
executionResults: new Map(),
activeIterations: new Set<number>(),
currentIteration: 1,
}
context.parallelExecutions?.set('parallel-1', state)
await manager.processParallelIterations(context)
// Should remove parallel from executed blocks and add to active path
expect(context.executedBlocks.has('parallel-1')).toBe(false)
expect(context.activeExecutionPath.has('parallel-1')).toBe(true)
// Should remove child nodes from active path
expect(context.activeExecutionPath.has('func-1')).toBe(false)
})
test('should skip completed parallels', async () => {
const parallels: SerializedWorkflow['parallels'] = {
'parallel-1': {
id: 'parallel-1',
nodes: ['func-1'],
distribution: ['a', 'b', 'c'],
},
}
const manager = new ParallelManager(parallels)
const context = createMockContext()
// Mark parallel as completed
context.completedLoops.add('parallel-1')
await manager.processParallelIterations(context)
// Should not modify execution state
expect(context.executedBlocks.size).toBe(0)
expect(context.activeExecutionPath.size).toBe(0)
})
test('should handle empty parallels object', async () => {
const manager = new ParallelManager({})
const context = createMockContext()
// Should complete without error
await expect(manager.processParallelIterations(context)).resolves.toBeUndefined()
})
})
})

View File

@@ -1,240 +0,0 @@
import { createLogger } from '@/lib/logs/console/logger'
import { ParallelRoutingUtils } from '@/executor/parallels/utils'
import type { ExecutionContext, NormalizedBlockOutput } from '@/executor/types'
import type { SerializedBlock, SerializedParallel, SerializedWorkflow } from '@/serializer/types'
const logger = createLogger('ParallelManager')
export interface ParallelState {
parallelCount: number
distributionItems: any[] | Record<string, any> | null
completedExecutions: number
executionResults: Map<string, any>
activeIterations: Set<number>
currentIteration: number
}
/**
* Manages parallel block execution and state.
* Handles distribution of items across parallel executions and tracking completion.
*/
export class ParallelManager {
constructor(private parallels: SerializedWorkflow['parallels'] = {}) {}
/**
* Initializes a parallel execution state.
*/
initializeParallel(
parallelId: string,
distributionItems: any[] | Record<string, any>
): ParallelState {
const parallelCount = Array.isArray(distributionItems)
? distributionItems.length
: Object.keys(distributionItems).length
return {
parallelCount,
distributionItems,
completedExecutions: 0,
executionResults: new Map(),
activeIterations: new Set(),
currentIteration: 1,
}
}
/**
* Gets the current item for a specific parallel iteration.
*/
getIterationItem(parallelState: ParallelState, iterationIndex: number): any {
if (!parallelState.distributionItems) {
return null
}
if (Array.isArray(parallelState.distributionItems)) {
return parallelState.distributionItems[iterationIndex]
}
return Object.entries(parallelState.distributionItems)[iterationIndex]
}
/**
* Checks if all virtual blocks that SHOULD execute for a parallel have been executed.
* This now respects conditional routing - only checks blocks that should execute.
*/
areAllVirtualBlocksExecuted(
parallelId: string,
parallel: SerializedParallel,
executedBlocks: Set<string>,
parallelState: ParallelState,
context: ExecutionContext
): boolean {
const result = ParallelRoutingUtils.areAllRequiredVirtualBlocksExecuted(
parallel,
parallelState.parallelCount,
executedBlocks,
context
)
if (result) {
logger.info(`All required virtual blocks completed for parallel ${parallelId}`)
} else {
logger.info(`Parallel ${parallelId} not complete - some blocks still need to execute`)
}
return result
}
/**
* Processes parallel iterations to check for completion and trigger re-execution.
*/
async processParallelIterations(context: ExecutionContext): Promise<void> {
if (!this.parallels || Object.keys(this.parallels).length === 0) {
return
}
for (const [parallelId, parallel] of Object.entries(this.parallels)) {
// Skip if this parallel has already been marked as completed
if (context.completedLoops.has(parallelId)) {
continue
}
// Check if the parallel block itself has been executed
const parallelBlockExecuted = context.executedBlocks.has(parallelId)
if (!parallelBlockExecuted) {
continue
}
// Get the parallel state
const parallelState = context.parallelExecutions?.get(parallelId)
if (!parallelState || parallelState.currentIteration === 0) {
continue
}
// Check if all virtual blocks have been executed
const allVirtualBlocksExecuted = this.areAllVirtualBlocksExecuted(
parallelId,
parallel,
context.executedBlocks,
parallelState,
context
)
if (allVirtualBlocksExecuted && !context.completedLoops.has(parallelId)) {
// Check if the parallel block already has aggregated results stored
const blockState = context.blockStates.get(parallelId)
if (blockState?.output?.completed && blockState?.output?.results) {
logger.info(
`Parallel ${parallelId} already has aggregated results, marking as completed without re-execution`
)
// Just mark it as completed without re-execution
context.completedLoops.add(parallelId)
// Activate the parallel-end-source connections if not already done
const parallelEndConnections =
context.workflow?.connections.filter(
(conn) => conn.source === parallelId && conn.sourceHandle === 'parallel-end-source'
) || []
for (const conn of parallelEndConnections) {
if (!context.activeExecutionPath.has(conn.target)) {
context.activeExecutionPath.add(conn.target)
logger.info(`Activated post-parallel path to ${conn.target}`)
}
}
continue
}
logger.info(
`All virtual blocks completed for parallel ${parallelId}, re-executing to aggregate results`
)
// Re-execute the parallel block to check completion and trigger end connections
context.executedBlocks.delete(parallelId)
context.activeExecutionPath.add(parallelId)
// IMPORTANT: Remove child nodes from active execution path to prevent re-execution
for (const nodeId of parallel.nodes) {
context.activeExecutionPath.delete(nodeId)
}
}
}
}
/**
* Creates virtual block instances for parallel execution.
*/
createVirtualBlockInstances(
block: SerializedBlock,
parallelId: string,
parallelState: ParallelState,
executedBlocks: Set<string>,
activeExecutionPath: Set<string>
): string[] {
const virtualBlockIds: string[] = []
for (let i = 0; i < parallelState.parallelCount; i++) {
const virtualBlockId = `${block.id}_parallel_${parallelId}_iteration_${i}`
// Skip if this virtual instance was already executed
if (executedBlocks.has(virtualBlockId)) {
continue
}
// Check if this virtual instance is in the active path
if (!activeExecutionPath.has(virtualBlockId) && !activeExecutionPath.has(block.id)) {
continue
}
virtualBlockIds.push(virtualBlockId)
}
return virtualBlockIds
}
/**
* Sets up iteration-specific context for a virtual block.
*/
setupIterationContext(
context: ExecutionContext,
parallelInfo: { parallelId: string; iterationIndex: number }
): void {
const parallelState = context.parallelExecutions?.get(parallelInfo.parallelId)
if (parallelState?.distributionItems) {
const currentItem = this.getIterationItem(parallelState, parallelInfo.iterationIndex)
// Store the current item for this specific iteration
const iterationKey = `${parallelInfo.parallelId}_iteration_${parallelInfo.iterationIndex}`
context.loopItems.set(iterationKey, currentItem)
context.loopItems.set(parallelInfo.parallelId, currentItem) // Backward compatibility
context.loopIterations.set(parallelInfo.parallelId, parallelInfo.iterationIndex)
logger.info(`Set up iteration context for ${iterationKey} with item:`, currentItem)
}
}
/**
* Stores the result of a parallel iteration.
*/
storeIterationResult(
context: ExecutionContext,
parallelId: string,
iterationIndex: number,
output: NormalizedBlockOutput
): void {
const parallelState = context.parallelExecutions?.get(parallelId)
if (parallelState) {
const iterationKey = `iteration_${iterationIndex}`
const existingResult = parallelState.executionResults.get(iterationKey)
if (existingResult) {
if (Array.isArray(existingResult)) {
existingResult.push(output)
} else {
parallelState.executionResults.set(iterationKey, [existingResult, output])
}
} else {
parallelState.executionResults.set(iterationKey, output)
}
}
}
}

View File

@@ -1,105 +0,0 @@
import { BlockType } from '@/executor/consts'
import type { ExecutionContext } from '@/executor/types'
import { ConnectionUtils } from '@/executor/utils/connections'
import { VirtualBlockUtils } from '@/executor/utils/virtual-blocks'
import type { SerializedParallel } from '@/serializer/types'
/**
* Utility functions for parallel block conditional routing logic.
* Shared between Executor and ParallelManager to ensure consistent behavior.
*/
export class ParallelRoutingUtils {
/**
* Determines if a block should execute in a specific parallel iteration
* based on conditional routing and active execution paths.
*/
static shouldBlockExecuteInParallelIteration(
nodeId: string,
parallel: SerializedParallel,
iteration: number,
context: ExecutionContext
): boolean {
const internalConnections = ConnectionUtils.getInternalConnections(
nodeId,
parallel.nodes,
context.workflow?.connections || []
)
// If no internal connections, check if this is truly a starting block or an unconnected block
if (internalConnections.length === 0) {
// Use helper to check if this is an unconnected block
if (ConnectionUtils.isUnconnectedBlock(nodeId, context.workflow?.connections || [])) {
return false
}
// If there are external connections, this is a legitimate starting block - should execute
return true
}
// For blocks with dependencies within the parallel, check if any incoming connection is active
// based on routing decisions made by executed source blocks
return internalConnections.some((conn) => {
const sourceVirtualId = VirtualBlockUtils.generateParallelId(
conn.source,
parallel.id,
iteration
)
// Source must be executed for the connection to be considered
if (!context.executedBlocks.has(sourceVirtualId)) {
return false
}
// Get the source block to check its type
const sourceBlock = context.workflow?.blocks.find((b) => b.id === conn.source)
const sourceBlockType = sourceBlock?.metadata?.id
// For condition blocks, check if the specific condition path was selected
if (sourceBlockType === BlockType.CONDITION) {
const selectedCondition = context.decisions.condition.get(sourceVirtualId)
const expectedHandle = `condition-${selectedCondition}`
return conn.sourceHandle === expectedHandle
}
// For router blocks, check if this specific target was selected
if (sourceBlockType === BlockType.ROUTER) {
const selectedTarget = context.decisions.router.get(sourceVirtualId)
return selectedTarget === conn.target
}
// For regular blocks, the connection is active if the source executed successfully
return true
})
}
/**
* Checks if all virtual blocks that SHOULD execute for a parallel have been executed.
* Respects conditional routing - only checks blocks that should execute.
*/
static areAllRequiredVirtualBlocksExecuted(
parallel: SerializedParallel,
parallelCount: number,
executedBlocks: Set<string>,
context: ExecutionContext
): boolean {
for (const nodeId of parallel.nodes) {
for (let i = 0; i < parallelCount; i++) {
// Check if this specific block should execute in this iteration
const shouldExecute = ParallelRoutingUtils.shouldBlockExecuteInParallelIteration(
nodeId,
parallel,
i,
context
)
if (shouldExecute) {
const virtualBlockId = VirtualBlockUtils.generateParallelId(nodeId, parallel.id, i)
if (!executedBlocks.has(virtualBlockId)) {
return false
}
}
}
}
return true
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,389 +0,0 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType } from '@/executor/consts'
import { Routing } from '@/executor/routing/routing'
import type { BlockState, ExecutionContext } from '@/executor/types'
import { ConnectionUtils } from '@/executor/utils/connections'
import { VirtualBlockUtils } from '@/executor/utils/virtual-blocks'
import type { SerializedBlock, SerializedConnection, SerializedWorkflow } from '@/serializer/types'
const logger = createLogger('PathTracker')
/**
* Manages the active execution paths in the workflow.
* Tracks which blocks should be executed based on routing decisions.
*/
export class PathTracker {
constructor(private workflow: SerializedWorkflow) {}
/**
* Checks if a block is in the active execution path.
* Considers router and condition block decisions.
*
* @param blockId - ID of the block to check
* @param context - Current execution context
* @returns Whether the block is in the active execution path
*/
isInActivePath(blockId: string, context: ExecutionContext): boolean {
// Early return if already in active path
if (context.activeExecutionPath.has(blockId)) {
return true
}
// Get all incoming connections to this block
const incomingConnections = this.getIncomingConnections(blockId)
// A block is in the active path if at least one of its incoming connections
// is from an active and executed block
return incomingConnections.some((conn) => this.isConnectionActive(conn, context))
}
/**
* Updates execution paths based on newly executed blocks.
* Handles router and condition block decisions to activate paths without deactivating others.
* Supports both original block IDs and virtual block IDs (for parallel execution).
*
* @param executedBlockIds - IDs of blocks that were just executed (may include virtual IDs)
* @param context - Current execution context
*/
updateExecutionPaths(executedBlockIds: string[], context: ExecutionContext): void {
for (const blockId of executedBlockIds) {
// Handle virtual block IDs from parallel execution
const originalBlockId = this.extractOriginalBlockId(blockId)
const block = this.getBlock(originalBlockId)
if (!block) continue
// Set currentVirtualBlockId so decision setting uses the correct key
const previousVirtualBlockId = context.currentVirtualBlockId
if (blockId !== originalBlockId) {
context.currentVirtualBlockId = blockId
}
this.updatePathForBlock(block, context)
// Restore previous virtual block ID
context.currentVirtualBlockId = previousVirtualBlockId
}
}
/**
* Extract original block ID from virtual block ID.
* Virtual block IDs have format: originalId_parallel_parallelId_iteration_N
*
* @param blockId - Block ID (may be virtual or original)
* @returns Original block ID
*/
private extractOriginalBlockId(blockId: string): string {
return VirtualBlockUtils.extractOriginalId(blockId)
}
/**
* Get all incoming connections to a block
*/
private getIncomingConnections(blockId: string): SerializedConnection[] {
return ConnectionUtils.getIncomingConnections(blockId, this.workflow.connections)
}
/**
* Get all outgoing connections from a block
*/
private getOutgoingConnections(blockId: string): SerializedConnection[] {
return ConnectionUtils.getOutgoingConnections(blockId, this.workflow.connections)
}
/**
* Get a block by ID
*/
private getBlock(blockId: string): SerializedBlock | undefined {
return this.workflow.blocks.find((b) => b.id === blockId)
}
/**
* Check if a connection is active based on its source block type and state
*/
private isConnectionActive(connection: SerializedConnection, context: ExecutionContext): boolean {
const sourceBlock = this.getBlock(connection.source)
if (!sourceBlock) return false
const blockType = sourceBlock.metadata?.id || ''
const category = Routing.getCategory(blockType)
// Use routing strategy to determine connection checking method
switch (category) {
case 'routing':
return blockType === BlockType.ROUTER
? this.isRouterConnectionActive(connection, context)
: this.isConditionConnectionActive(connection, context)
default:
return this.isRegularConnectionActive(connection, context)
}
}
/**
* Check if a router connection is active
*/
private isRouterConnectionActive(
connection: SerializedConnection,
context: ExecutionContext
): boolean {
const selectedTarget = context.decisions.router.get(connection.source)
return context.executedBlocks.has(connection.source) && selectedTarget === connection.target
}
/**
* Check if a condition connection is active
*/
private isConditionConnectionActive(
connection: SerializedConnection,
context: ExecutionContext
): boolean {
if (!connection.sourceHandle?.startsWith('condition-')) {
return false
}
const conditionId = connection.sourceHandle.replace('condition-', '')
const selectedCondition = context.decisions.condition.get(connection.source)
return context.executedBlocks.has(connection.source) && conditionId === selectedCondition
}
/**
* Check if a regular connection is active
*/
private isRegularConnectionActive(
connection: SerializedConnection,
context: ExecutionContext
): boolean {
return (
context.activeExecutionPath.has(connection.source) &&
context.executedBlocks.has(connection.source)
)
}
/**
* Update paths for a specific block based on its type
*/
private updatePathForBlock(block: SerializedBlock, context: ExecutionContext): void {
const blockType = block.metadata?.id || ''
const category = Routing.getCategory(blockType)
switch (category) {
case 'routing':
if (blockType === BlockType.ROUTER) {
this.updateRouterPaths(block, context)
} else {
this.updateConditionPaths(block, context)
}
break
case 'flow-control':
if (blockType === BlockType.LOOP) {
this.updateLoopPaths(block, context)
} else {
// For parallel blocks, they're handled by their own handler
this.updateRegularBlockPaths(block, context)
}
break
default:
this.updateRegularBlockPaths(block, context)
break
}
}
/**
* Update paths for router blocks
*/
private updateRouterPaths(block: SerializedBlock, context: ExecutionContext): void {
const blockStateKey = context.currentVirtualBlockId || block.id
const routerOutput = context.blockStates.get(blockStateKey)?.output
const selectedPath = routerOutput?.selectedPath?.blockId
if (selectedPath) {
const decisionKey = context.currentVirtualBlockId || block.id
if (!context.decisions.router.has(decisionKey)) {
context.decisions.router.set(decisionKey, selectedPath)
}
context.activeExecutionPath.add(selectedPath)
// Check if the selected target should activate downstream paths
const selectedBlock = this.getBlock(selectedPath)
const selectedBlockType = selectedBlock?.metadata?.id || ''
const selectedCategory = Routing.getCategory(selectedBlockType)
// Only activate downstream paths for regular blocks
// Routing blocks make their own routing decisions when they execute
// Flow control blocks manage their own path activation
if (selectedCategory === 'regular') {
this.activateDownstreamPathsSelectively(selectedPath, context)
}
logger.info(`Router ${block.id} selected path: ${selectedPath}`)
}
}
/**
* Selectively activate downstream paths, respecting block routing behavior
* This prevents flow control blocks from being activated when they should be controlled by routing
*/
private activateDownstreamPathsSelectively(blockId: string, context: ExecutionContext): void {
const outgoingConnections = this.getOutgoingConnections(blockId)
for (const conn of outgoingConnections) {
if (!context.activeExecutionPath.has(conn.target)) {
const targetBlock = this.getBlock(conn.target)
const targetBlockType = targetBlock?.metadata?.id
// Use routing strategy to determine if this connection should be activated
if (!Routing.shouldSkipConnection(conn.sourceHandle, targetBlockType || '')) {
context.activeExecutionPath.add(conn.target)
// Recursively activate downstream paths if the target block should activate downstream
if (Routing.shouldActivateDownstream(targetBlockType || '')) {
this.activateDownstreamPathsSelectively(conn.target, context)
}
}
}
}
}
/**
* Update paths for condition blocks
*/
private updateConditionPaths(block: SerializedBlock, context: ExecutionContext): void {
// Read block state using the correct ID (virtual ID if in parallel execution, otherwise original ID)
const blockStateKey = context.currentVirtualBlockId || block.id
const conditionOutput = context.blockStates.get(blockStateKey)?.output
const selectedConditionId = conditionOutput?.selectedConditionId
if (!selectedConditionId) return
const decisionKey = context.currentVirtualBlockId || block.id
if (!context.decisions.condition.has(decisionKey)) {
context.decisions.condition.set(decisionKey, selectedConditionId)
}
const targetConnections = this.workflow.connections.filter(
(conn) => conn.source === block.id && conn.sourceHandle === `condition-${selectedConditionId}`
)
for (const conn of targetConnections) {
context.activeExecutionPath.add(conn.target)
logger.debug(`Condition ${block.id} activated path to: ${conn.target}`)
// Check if the selected target should activate downstream paths
const selectedBlock = this.getBlock(conn.target)
const selectedBlockType = selectedBlock?.metadata?.id || ''
const selectedCategory = Routing.getCategory(selectedBlockType)
// Only activate downstream paths for regular blocks
// Routing blocks make their own routing decisions when they execute
// Flow control blocks manage their own path activation
if (selectedCategory === 'regular') {
this.activateDownstreamPathsSelectively(conn.target, context)
}
}
}
/**
* Update paths for loop blocks
*/
private updateLoopPaths(block: SerializedBlock, context: ExecutionContext): void {
// Don't activate loop-start connections if the loop has completed
// (e.g., while loop condition is false)
if (context.completedLoops.has(block.id)) {
return
}
const outgoingConnections = this.getOutgoingConnections(block.id)
for (const conn of outgoingConnections) {
// Only activate loop-start connections
if (conn.sourceHandle === 'loop-start-source') {
context.activeExecutionPath.add(conn.target)
}
// loop-end-source connections will be activated by the loop manager
}
}
/**
* Update paths for regular blocks
*/
private updateRegularBlockPaths(block: SerializedBlock, context: ExecutionContext): void {
// Read block state using the correct ID (virtual ID if in parallel execution, otherwise original ID)
const blockStateKey = context.currentVirtualBlockId || block.id
const blockState = context.blockStates.get(blockStateKey)
const hasError = this.blockHasError(blockState)
const outgoingConnections = this.getOutgoingConnections(block.id)
// Check if block is part of loops
const blockLoops = this.getBlockLoops(block.id, context)
const isPartOfLoop = blockLoops.length > 0
for (const conn of outgoingConnections) {
if (this.shouldActivateConnection(conn, hasError, isPartOfLoop, blockLoops, context)) {
const targetBlock = this.getBlock(conn.target)
const targetBlockType = targetBlock?.metadata?.id
// Use routing strategy to determine if this connection should be activated
if (Routing.shouldSkipConnection(conn.sourceHandle, targetBlockType || '')) {
continue
}
context.activeExecutionPath.add(conn.target)
}
}
}
/**
* Check if a block has an error
*/
private blockHasError(blockState: BlockState | undefined): boolean {
return blockState?.output?.error !== undefined
}
/**
* Get loops that contain a block
*/
private getBlockLoops(
blockId: string,
context: ExecutionContext
): Array<{ id: string; loop: any }> {
return Object.entries(context.workflow?.loops || {})
.filter(([_, loop]) => loop.nodes.includes(blockId))
.map(([id, loop]) => ({ id, loop }))
}
/**
* Determine if a connection should be activated
*/
private shouldActivateConnection(
conn: SerializedConnection,
hasError: boolean,
isPartOfLoop: boolean,
blockLoops: Array<{ id: string; loop: any }>,
context: ExecutionContext
): boolean {
// Check if this is an external loop connection
if (isPartOfLoop) {
const isInternalConnection = blockLoops.some(({ loop }) => loop.nodes.includes(conn.target))
const isExternalConnection = !isInternalConnection
const allLoopsCompleted = blockLoops.every(({ id }) => context.completedLoops?.has(id))
// Skip external connections unless all loops are completed
if (isExternalConnection && !allLoopsCompleted) {
return false
}
}
// Handle error connections
if (conn.sourceHandle === 'error') {
return hasError
}
// Handle regular connections
if (conn.sourceHandle === 'source' || !conn.sourceHandle) {
return !hasError
}
// All other connection types are activated
return true
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,151 +0,0 @@
import { describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { BlockCategory, Routing } from '@/executor/routing/routing'
describe('Routing', () => {
describe('getCategory', () => {
it.concurrent('should categorize flow control blocks correctly', () => {
expect(Routing.getCategory(BlockType.PARALLEL)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.LOOP)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe(BlockCategory.FLOW_CONTROL)
})
it.concurrent('should categorize routing blocks correctly', () => {
expect(Routing.getCategory(BlockType.ROUTER)).toBe(BlockCategory.ROUTING_BLOCK)
expect(Routing.getCategory(BlockType.CONDITION)).toBe(BlockCategory.ROUTING_BLOCK)
})
it.concurrent('should categorize regular blocks correctly', () => {
expect(Routing.getCategory(BlockType.FUNCTION)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.AGENT)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.API)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.STARTER)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.RESPONSE)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.EVALUATOR)).toBe(BlockCategory.REGULAR_BLOCK)
})
it.concurrent('should default to regular block for unknown types', () => {
expect(Routing.getCategory('unknown')).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory('')).toBe(BlockCategory.REGULAR_BLOCK)
})
})
describe('shouldActivateDownstream', () => {
it.concurrent('should return true for routing blocks', () => {
expect(Routing.shouldActivateDownstream(BlockType.ROUTER)).toBe(true)
expect(Routing.shouldActivateDownstream(BlockType.CONDITION)).toBe(true)
})
it.concurrent('should return false for flow control blocks', () => {
expect(Routing.shouldActivateDownstream(BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.LOOP)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.WORKFLOW)).toBe(false)
})
it.concurrent('should return true for regular blocks', () => {
expect(Routing.shouldActivateDownstream(BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldActivateDownstream(BlockType.AGENT)).toBe(true)
})
it.concurrent('should handle empty/undefined block types', () => {
expect(Routing.shouldActivateDownstream('')).toBe(true)
expect(Routing.shouldActivateDownstream(undefined as any)).toBe(true)
})
})
describe('requiresActivePathCheck', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.requiresActivePathCheck(BlockType.PARALLEL)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.LOOP)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {
expect(Routing.requiresActivePathCheck(BlockType.ROUTER)).toBe(false)
expect(Routing.requiresActivePathCheck(BlockType.CONDITION)).toBe(false)
})
it.concurrent('should return false for regular blocks', () => {
expect(Routing.requiresActivePathCheck(BlockType.FUNCTION)).toBe(false)
expect(Routing.requiresActivePathCheck(BlockType.AGENT)).toBe(false)
})
it.concurrent('should handle empty/undefined block types', () => {
expect(Routing.requiresActivePathCheck('')).toBe(false)
expect(Routing.requiresActivePathCheck(undefined as any)).toBe(false)
})
})
describe('shouldSkipInSelectiveActivation', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.shouldSkipInSelectiveActivation(BlockType.PARALLEL)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.LOOP)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {
expect(Routing.shouldSkipInSelectiveActivation(BlockType.ROUTER)).toBe(false)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.CONDITION)).toBe(false)
})
it.concurrent('should return false for regular blocks', () => {
expect(Routing.shouldSkipInSelectiveActivation(BlockType.FUNCTION)).toBe(false)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.AGENT)).toBe(false)
})
})
describe('shouldSkipConnection', () => {
it.concurrent('should allow regular connections to flow control blocks', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.LOOP)).toBe(false)
})
it.concurrent('should skip flow control specific connections', () => {
expect(Routing.shouldSkipConnection('parallel-start-source', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('parallel-end-source', BlockType.AGENT)).toBe(true)
expect(Routing.shouldSkipConnection('loop-start-source', BlockType.API)).toBe(true)
expect(Routing.shouldSkipConnection('loop-end-source', BlockType.EVALUATOR)).toBe(true)
})
it.concurrent('should not skip regular connections to regular blocks', () => {
expect(Routing.shouldSkipConnection('source', BlockType.FUNCTION)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.AGENT)).toBe(false)
expect(Routing.shouldSkipConnection(undefined, BlockType.API)).toBe(false)
})
it.concurrent('should skip condition-specific connections during selective activation', () => {
expect(Routing.shouldSkipConnection('condition-test-if', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('condition-test-else', BlockType.AGENT)).toBe(true)
})
it.concurrent('should handle empty/undefined types', () => {
expect(Routing.shouldSkipConnection('', '')).toBe(false)
expect(Routing.shouldSkipConnection(undefined, '')).toBe(false)
})
})
describe('getBehavior', () => {
it.concurrent('should return correct behavior for each category', () => {
const flowControlBehavior = Routing.getBehavior(BlockType.PARALLEL)
expect(flowControlBehavior).toEqual({
shouldActivateDownstream: false,
requiresActivePathCheck: true,
skipInSelectiveActivation: true,
})
const routingBehavior = Routing.getBehavior(BlockType.ROUTER)
expect(routingBehavior).toEqual({
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
})
const regularBehavior = Routing.getBehavior(BlockType.FUNCTION)
expect(regularBehavior).toEqual({
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
})
})
})
})

View File

@@ -1,159 +0,0 @@
import { BlockType } from '@/executor/consts'
export enum BlockCategory {
ROUTING_BLOCK = 'routing', // router, condition - make routing decisions
FLOW_CONTROL = 'flow-control', // parallel, loop - control execution flow
REGULAR_BLOCK = 'regular', // function, agent, etc. - regular execution
}
export interface RoutingBehavior {
shouldActivateDownstream: boolean // Whether this block should activate downstream blocks when it completes
requiresActivePathCheck: boolean // Whether this block's handler needs routing-aware logic (NOT universal path checking)
skipInSelectiveActivation: boolean // Whether to skip this block type during connection filtering in selective activation
}
/**
* Centralized routing strategy that defines how different block types
* should behave in the execution path system.
*
* IMPORTANT: This system works in conjunction with the executor's universal
* active path checking (executor/index.ts lines 992-994). The flags here
* control specialized behavior, not basic path enforcement.
*
* ## Execution Flow Architecture:
*
* 1. **Universal Path Check** (Executor Level):
* - ALL blocks are subject to `context.activeExecutionPath.has(block.id)`
* - This prevents unselected blocks from executing (fixes router bypass bug)
*
* 2. **Specialized Routing Behavior** (Handler Level):
* - Some block handlers need additional routing logic
* - Controlled by `requiresActivePathCheck` flag
*
* ## Block Categories Explained:
*
* ### ROUTING_BLOCK (Router, Condition)
* - **Role**: Decision makers that CREATE active execution paths
* - **Path Check**: NO - they must execute to make routing decisions
* - **Downstream**: YES - they activate their selected targets
* - **Selective**: NO - they participate in making routing decisions
*
* ### FLOW_CONTROL (Parallel, Loop, Workflow)
* - **Role**: Complex blocks that CONSUME routing decisions
* - **Path Check**: YES - their handlers need routing awareness for internal logic
* - **Downstream**: NO - they manage their own internal activation patterns
* - **Selective**: YES - skip them during connection filtering to prevent premature activation
*
* ### REGULAR_BLOCK (Function, Agent, API, etc.)
* - **Role**: Standard execution blocks with simple activation patterns
* - **Path Check**: NO - they rely on dependency logic and universal path checking
* - **Downstream**: YES - they activate all downstream blocks normally
* - **Selective**: NO - they participate in normal activation patterns
*
* ## Multi-Input Support:
* The dependency checking logic (executor/index.ts lines 1149-1153) allows blocks
* with multiple inputs to execute when ANY valid input is available, supporting
* scenarios like agents that reference multiple router destinations.
*/
export class Routing {
private static readonly BEHAVIOR_MAP: Record<BlockCategory, RoutingBehavior> = {
[BlockCategory.ROUTING_BLOCK]: {
shouldActivateDownstream: true, // Routing blocks activate their SELECTED targets (not all connected targets)
requiresActivePathCheck: false, // They don't need handler-level path checking - they CREATE the paths
skipInSelectiveActivation: false, // They participate in routing decisions, so don't skip during activation
},
[BlockCategory.FLOW_CONTROL]: {
shouldActivateDownstream: false, // Flow control blocks manage their own complex internal activation
requiresActivePathCheck: true, // Their handlers need routing context for internal decision making
skipInSelectiveActivation: true, // Skip during selective activation to prevent bypassing routing decisions
},
[BlockCategory.REGULAR_BLOCK]: {
shouldActivateDownstream: true, // Regular blocks activate all connected downstream blocks
requiresActivePathCheck: false, // They use universal path checking + dependency logic instead
skipInSelectiveActivation: false, // They participate in normal activation patterns
},
}
private static readonly BLOCK_TYPE_TO_CATEGORY: Record<string, BlockCategory> = {
// Flow control blocks
[BlockType.PARALLEL]: BlockCategory.FLOW_CONTROL,
[BlockType.LOOP]: BlockCategory.FLOW_CONTROL,
[BlockType.WORKFLOW]: BlockCategory.FLOW_CONTROL,
// Routing blocks
[BlockType.ROUTER]: BlockCategory.ROUTING_BLOCK,
[BlockType.CONDITION]: BlockCategory.ROUTING_BLOCK,
// Regular blocks (default category)
[BlockType.FUNCTION]: BlockCategory.REGULAR_BLOCK,
[BlockType.AGENT]: BlockCategory.REGULAR_BLOCK,
[BlockType.API]: BlockCategory.REGULAR_BLOCK,
[BlockType.EVALUATOR]: BlockCategory.REGULAR_BLOCK,
[BlockType.RESPONSE]: BlockCategory.REGULAR_BLOCK,
[BlockType.STARTER]: BlockCategory.REGULAR_BLOCK,
}
static getCategory(blockType: string): BlockCategory {
return Routing.BLOCK_TYPE_TO_CATEGORY[blockType] || BlockCategory.REGULAR_BLOCK
}
static getBehavior(blockType: string): RoutingBehavior {
const category = Routing.getCategory(blockType)
return Routing.BEHAVIOR_MAP[category]
}
static shouldActivateDownstream(blockType: string): boolean {
return Routing.getBehavior(blockType).shouldActivateDownstream
}
/**
* Determines if a block's HANDLER needs routing-aware logic.
* Note: This is NOT the same as universal path checking done by the executor.
*
* @param blockType The block type to check
* @returns true if the block handler should implement routing-aware behavior
*/
static requiresActivePathCheck(blockType: string): boolean {
return Routing.getBehavior(blockType).requiresActivePathCheck
}
/**
* Determines if a block type should be skipped during selective activation.
* Used to prevent certain block types from being prematurely activated
* when they should wait for explicit routing decisions.
*/
static shouldSkipInSelectiveActivation(blockType: string): boolean {
return Routing.getBehavior(blockType).skipInSelectiveActivation
}
/**
* Checks if a connection should be skipped during selective activation.
*
* This prevents certain types of connections from triggering premature
* activation of blocks that should wait for explicit routing decisions.
*/
static shouldSkipConnection(sourceHandle: string | undefined, targetBlockType: string): boolean {
// Skip flow control specific connections (internal flow control handles)
const flowControlHandles = [
'parallel-start-source',
'parallel-end-source',
'loop-start-source',
'loop-end-source',
]
if (flowControlHandles.includes(sourceHandle || '')) {
return true
}
// Skip condition-specific connections during selective activation
// These should only be activated when the condition makes a specific decision
if (sourceHandle?.startsWith('condition-')) {
return true
}
// For regular connections (no special source handle), allow activation of flow control blocks
// This enables regular blocks (like agents) to activate parallel/loop blocks
// The flow control blocks themselves will handle active path checking
return false
}
}

View File

@@ -1,224 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { Executor } from '@/executor'
import { BlockType } from '@/executor/consts'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Full Executor Test', () => {
let workflow: SerializedWorkflow
let executor: Executor
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'bd9f4f7d-8aed-4860-a3be-8bebd1931b19',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'f29a40b7-125a-45a7-a670-af14a1498f94',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: {
tool: BlockType.ROUTER,
params: {
prompt: 'if x then function 1\nif y then parallel\n\ninput: x',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
position: { x: 200, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: { tool: BlockType.FUNCTION, params: { code: "return 'one'" } },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'a62902db-fd8d-4851-aa88-acd5e7667497',
position: { x: 200, y: 50 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '0494cf56-2520-4e29-98ad-313ea55cf142',
position: { x: 300, y: -50 },
metadata: { id: 'condition', name: 'Condition 1' },
config: { tool: 'condition', params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '033ea142-3002-4a68-9e12-092b10b8c9c8',
position: { x: 400, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: { code: "return 'two'" } },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '037140a8-fda3-44e2-896c-6adea53ea30f',
position: { x: 400, y: 0 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel 2' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'a91e3a02-b884-4823-8197-30ae498ac94c',
position: { x: 300, y: 100 },
metadata: { id: 'agent', name: 'Agent 1' },
config: { tool: 'agent', params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '97974a42-cdf4-4810-9caa-b5e339f42ab0',
position: { x: 500, y: 0 },
metadata: { id: 'agent', name: 'Agent 2' },
config: { tool: 'agent', params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{
source: 'bd9f4f7d-8aed-4860-a3be-8bebd1931b19',
target: 'f29a40b7-125a-45a7-a670-af14a1498f94',
},
{
source: 'f29a40b7-125a-45a7-a670-af14a1498f94',
target: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
},
{
source: 'f29a40b7-125a-45a7-a670-af14a1498f94',
target: 'a62902db-fd8d-4851-aa88-acd5e7667497',
},
{
source: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
target: '0494cf56-2520-4e29-98ad-313ea55cf142',
},
{
source: '0494cf56-2520-4e29-98ad-313ea55cf142',
target: '033ea142-3002-4a68-9e12-092b10b8c9c8',
sourceHandle: 'condition-0494cf56-2520-4e29-98ad-313ea55cf142-if',
},
{
source: '0494cf56-2520-4e29-98ad-313ea55cf142',
target: '037140a8-fda3-44e2-896c-6adea53ea30f',
sourceHandle: 'condition-0494cf56-2520-4e29-98ad-313ea55cf142-else',
},
{
source: 'a62902db-fd8d-4851-aa88-acd5e7667497',
target: 'a91e3a02-b884-4823-8197-30ae498ac94c',
sourceHandle: 'parallel-start-source',
},
{
source: '037140a8-fda3-44e2-896c-6adea53ea30f',
target: '97974a42-cdf4-4810-9caa-b5e339f42ab0',
sourceHandle: 'parallel-start-source',
},
],
loops: {},
parallels: {
'a62902db-fd8d-4851-aa88-acd5e7667497': {
id: 'a62902db-fd8d-4851-aa88-acd5e7667497',
nodes: ['a91e3a02-b884-4823-8197-30ae498ac94c'],
distribution: ['item1', 'item2'],
},
'037140a8-fda3-44e2-896c-6adea53ea30f': {
id: '037140a8-fda3-44e2-896c-6adea53ea30f',
nodes: ['97974a42-cdf4-4810-9caa-b5e339f42ab0'],
distribution: ['item1', 'item2'],
},
},
}
executor = new Executor(workflow)
})
it('should test the full executor flow and see what happens', async () => {
// Mock the necessary functions to avoid actual API calls
const mockInput = {}
try {
// Execute the workflow
const result = await executor.execute('test-workflow-id')
// Check if it's an ExecutionResult (not StreamingExecution)
if ('success' in result) {
// Check if there are any logs that might indicate what happened
if (result.logs) {
}
// The test itself doesn't need to assert anything specific
// We just want to see what the executor does
expect(result.success).toBeDefined()
} else {
expect(result).toBeDefined()
}
} catch (error) {}
})
it('should test the executor getNextExecutionLayer method directly', async () => {
// Create a mock context in the exact state after the condition executes
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Set up the state as it would be after the condition executes
context.executedBlocks.add('bd9f4f7d-8aed-4860-a3be-8bebd1931b19') // Start
context.executedBlocks.add('f29a40b7-125a-45a7-a670-af14a1498f94') // Router 1
context.executedBlocks.add('d09b0a90-2c59-4a2c-af15-c30321e36d9b') // Function 1
context.executedBlocks.add('0494cf56-2520-4e29-98ad-313ea55cf142') // Condition 1
context.executedBlocks.add('033ea142-3002-4a68-9e12-092b10b8c9c8') // Function 2
// Set router decision
context.decisions.router.set(
'f29a40b7-125a-45a7-a670-af14a1498f94',
'd09b0a90-2c59-4a2c-af15-c30321e36d9b'
)
// Set condition decision to if path (Function 2)
context.decisions.condition.set(
'0494cf56-2520-4e29-98ad-313ea55cf142',
'0494cf56-2520-4e29-98ad-313ea55cf142-if'
)
// Set up active execution path as it should be after condition
context.activeExecutionPath.add('bd9f4f7d-8aed-4860-a3be-8bebd1931b19')
context.activeExecutionPath.add('f29a40b7-125a-45a7-a670-af14a1498f94')
context.activeExecutionPath.add('d09b0a90-2c59-4a2c-af15-c30321e36d9b')
context.activeExecutionPath.add('0494cf56-2520-4e29-98ad-313ea55cf142')
context.activeExecutionPath.add('033ea142-3002-4a68-9e12-092b10b8c9c8')
// Get the next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// Check if Parallel 2 is in the next execution layer
const hasParallel2 = nextLayer.includes('037140a8-fda3-44e2-896c-6adea53ea30f')
// Check if Agent 2 is in the next execution layer
const hasAgent2 = nextLayer.includes('97974a42-cdf4-4810-9caa-b5e339f42ab0')
// The key test: Parallel 2 should NOT be in the next execution layer
expect(nextLayer).not.toContain('037140a8-fda3-44e2-896c-6adea53ea30f')
expect(nextLayer).not.toContain('97974a42-cdf4-4810-9caa-b5e339f42ab0')
})
})

View File

@@ -1,253 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { Executor } from '@/executor/index'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Multi-Input Routing Scenarios', () => {
let workflow: SerializedWorkflow
let executor: Executor
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'start',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 150, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: {
tool: BlockType.ROUTER,
params: {
prompt: 'if the input is x, go to function 1.\notherwise, go to function 2.\ny',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 300, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'hi'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: 100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'bye'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
position: { x: 500, y: 0 },
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
config: {
tool: BlockType.AGENT,
params: {
systemPrompt: 'return the following in urdu roman english',
userPrompt: '<function1.result>\n<function2.result>',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'function-2' },
{ source: 'function-1', target: 'agent-1' }, // Agent depends on function-1
{ source: 'function-2', target: 'agent-1' }, // Agent depends on function-2
],
loops: {},
parallels: {},
}
executor = new Executor(workflow, {}, {})
})
it('should handle multi-input target when router selects function-1', async () => {
// Test scenario: Router selects function-1, agent should still execute with function-1's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start block
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
// Step 2: Router selects function-1 (not function-2)
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-1')
// Update execution paths after router-1
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-1 is active
expect(context.activeExecutionPath.has('function-1')).toBe(true)
expect(context.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Execute function-1
context.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-1')
// Update paths after function-1
pathTracker.updateExecutionPaths(['function-1'], context)
// Step 4: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
// Check dependencies for agent-1
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 5: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should be able to execute even though it has multiple inputs
// The key is that the dependency logic should handle this correctly:
// - function-1 executed and is selected → dependency met
// - function-2 not executed and not selected → dependency considered met (inactive source)
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should handle multi-input target when router selects function-2', async () => {
// Test scenario: Router selects function-2, agent should still execute with function-2's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start and router-1 selecting function-2
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-2')
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-2 is active
expect(context.activeExecutionPath.has('function-1')).toBe(false)
expect(context.activeExecutionPath.has('function-2')).toBe(true)
// Step 2: Execute function-2
context.blockStates.set('function-2', {
output: { result: 'bye', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-2')
pathTracker.updateExecutionPaths(['function-2'], context)
// Step 3: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 4: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should execute with function-2's output
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should verify the dependency logic for inactive sources', async () => {
// This test specifically validates the multi-input dependency logic
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Setup: Router executed and selected function-1, function-1 executed
context.executedBlocks.add('start')
context.executedBlocks.add('router-1')
context.executedBlocks.add('function-1')
context.decisions.router.set('router-1', 'function-1')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.activeExecutionPath.add('function-1')
context.activeExecutionPath.add('agent-1') // Agent should be active due to function-1
// Test individual dependency checks
const checkDependencies = (executor as any).checkDependencies.bind(executor)
// Connection from function-1 (executed, selected) → should be met
const function1Connection = [{ source: 'function-1', target: 'agent-1' }]
const function1DepMet = checkDependencies(function1Connection, context.executedBlocks, context)
// Connection from function-2 (not executed, not selected) → should be met because of inactive source logic
const function2Connection = [{ source: 'function-2', target: 'agent-1' }]
const function2DepMet = checkDependencies(function2Connection, context.executedBlocks, context)
// Both connections together (the actual agent scenario)
const bothConnections = [
{ source: 'function-1', target: 'agent-1' },
{ source: 'function-2', target: 'agent-1' },
]
const bothDepMet = checkDependencies(bothConnections, context.executedBlocks, context)
// CRITICAL ASSERTIONS:
expect(function1DepMet).toBe(true) // Executed and active
expect(function2DepMet).toBe(true) // Not in active path, so considered met (line 1151)
expect(bothDepMet).toBe(true) // All dependencies should be met
})
})

View File

@@ -1,307 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Nested Routing Fix - Router → Condition → Target', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let mockContext: ExecutionContext
beforeEach(() => {
// Create a workflow similar to the screenshot: Router → Condition → Function/Parallel
workflow = {
version: '2.0',
blocks: [
{
id: 'starter',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 200, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'condition-1',
position: { x: 200, y: 100 },
metadata: { id: BlockType.CONDITION, name: 'Condition 1' },
config: { tool: BlockType.CONDITION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-4',
position: { x: 350, y: 50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 4' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-block',
position: { x: 350, y: 150 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel Block' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-inside-parallel',
position: { x: 450, y: 150 },
metadata: { id: BlockType.AGENT, name: 'Agent Inside Parallel' },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'starter', target: 'router-1' },
{ source: 'router-1', target: 'function-2' },
{ source: 'router-1', target: 'condition-1' },
{
source: 'condition-1',
target: 'function-4',
sourceHandle: 'condition-b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-if',
},
{
source: 'condition-1',
target: 'parallel-block',
sourceHandle: 'condition-b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-else',
},
{
source: 'parallel-block',
target: 'agent-inside-parallel',
sourceHandle: 'parallel-start-source',
},
],
loops: {},
parallels: {
'parallel-block': {
id: 'parallel-block',
nodes: ['agent-inside-parallel'],
distribution: ['item1', 'item2'],
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
// Initialize starter as executed and in active path
mockContext.executedBlocks.add('starter')
mockContext.activeExecutionPath.add('starter')
mockContext.activeExecutionPath.add('router-1')
})
it('should handle nested routing: router selects condition, condition selects function', () => {
// Step 1: Router selects the condition path (not function-2)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'condition-1',
blockType: BlockType.CONDITION,
blockTitle: 'Condition 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
// Update paths after router execution
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Verify router decision
expect(mockContext.decisions.router.get('router-1')).toBe('condition-1')
// After router execution, condition should be active but not function-2
expect(mockContext.activeExecutionPath.has('condition-1')).toBe(true)
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
// CRITICAL: Parallel block should NOT be activated yet
expect(mockContext.activeExecutionPath.has('parallel-block')).toBe(false)
expect(mockContext.activeExecutionPath.has('agent-inside-parallel')).toBe(false)
// Step 2: Condition executes and selects function-4 (not parallel)
mockContext.blockStates.set('condition-1', {
output: {
result: 'two',
stdout: '',
conditionResult: true,
selectedPath: {
blockId: 'function-4',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 4',
},
selectedConditionId: 'b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-if',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('condition-1')
// Update paths after condition execution
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// Verify condition decision
expect(mockContext.decisions.condition.get('condition-1')).toBe(
'b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-if'
)
// After condition execution, function-4 should be active
expect(mockContext.activeExecutionPath.has('function-4')).toBe(true)
// CRITICAL: Parallel block should still NOT be activated
expect(mockContext.activeExecutionPath.has('parallel-block')).toBe(false)
expect(mockContext.activeExecutionPath.has('agent-inside-parallel')).toBe(false)
})
it('should handle nested routing: router selects condition, condition selects parallel', () => {
// Step 1: Router selects the condition path
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'condition-1',
blockType: BlockType.CONDITION,
blockTitle: 'Condition 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Step 2: Condition executes and selects parallel-block (not function-4)
mockContext.blockStates.set('condition-1', {
output: {
result: 'else',
stdout: '',
conditionResult: false,
selectedPath: {
blockId: 'parallel-block',
blockType: BlockType.PARALLEL,
blockTitle: 'Parallel Block',
},
selectedConditionId: 'b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-else',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('condition-1')
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// Verify condition decision
expect(mockContext.decisions.condition.get('condition-1')).toBe(
'b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-else'
)
// After condition execution, parallel-block should be active
expect(mockContext.activeExecutionPath.has('parallel-block')).toBe(true)
// Function-4 should NOT be activated
expect(mockContext.activeExecutionPath.has('function-4')).toBe(false)
// The agent inside parallel should NOT be automatically activated
// It should only be activated when the parallel block executes
expect(mockContext.activeExecutionPath.has('agent-inside-parallel')).toBe(false)
})
it('should prevent parallel blocks from executing when not selected by nested routing', () => {
// This test simulates the exact scenario from the bug report
// Step 1: Router selects condition path
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'condition-1',
blockType: BlockType.CONDITION,
blockTitle: 'Condition 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Step 2: Condition selects function-4 (NOT parallel)
mockContext.blockStates.set('condition-1', {
output: {
result: 'two',
stdout: '',
conditionResult: true,
selectedPath: {
blockId: 'function-4',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 4',
},
selectedConditionId: 'b8f0a33c-a57f-4a36-ac7a-dc9f2b5e6c07-if',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('condition-1')
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// Step 3: Simulate what the executor's getNextExecutionLayer would do
const blocksToExecute = workflow.blocks.filter(
(block) =>
mockContext.activeExecutionPath.has(block.id) && !mockContext.executedBlocks.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should only include function-4, NOT parallel-block
expect(blockIds).toContain('function-4')
expect(blockIds).not.toContain('parallel-block')
expect(blockIds).not.toContain('agent-inside-parallel')
// Verify that parallel block is not in active path
expect(mockContext.activeExecutionPath.has('parallel-block')).toBe(false)
// Verify that isInActivePath also returns false for parallel block
const isParallelActive = pathTracker.isInActivePath('parallel-block', mockContext)
expect(isParallelActive).toBe(false)
})
})

View File

@@ -1,131 +0,0 @@
import { describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { Routing } from '@/executor/routing/routing'
describe('Parallel Activation Integration - shouldSkipConnection behavior', () => {
describe('Regular blocks can activate parallel/loop blocks', () => {
it('should allow Agent → Parallel connections', () => {
// This was the original bug - agent couldn't activate parallel
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.PARALLEL)).toBe(false)
})
it('should allow Function → Parallel connections', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.PARALLEL)).toBe(false)
})
it('should allow API → Loop connections', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.LOOP)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.LOOP)).toBe(false)
})
it('should allow all regular blocks to activate parallel/loop', () => {
const regularBlocks = [
BlockType.FUNCTION,
BlockType.AGENT,
BlockType.API,
BlockType.EVALUATOR,
BlockType.RESPONSE,
BlockType.WORKFLOW,
]
regularBlocks.forEach((sourceBlockType) => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldSkipConnection(undefined, BlockType.LOOP)).toBe(false)
})
})
})
describe('✅ Still works: Router and Condition blocks can activate parallel/loop', () => {
it('should allow Router → Parallel connections', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
it('should allow Condition → Parallel connections', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
})
describe('✅ Still blocked: Internal flow control connections', () => {
it('should block parallel-start-source connections during selective activation', () => {
expect(Routing.shouldSkipConnection('parallel-start-source', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('parallel-start-source', BlockType.AGENT)).toBe(true)
})
it('should block parallel-end-source connections during selective activation', () => {
expect(Routing.shouldSkipConnection('parallel-end-source', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('parallel-end-source', BlockType.AGENT)).toBe(true)
})
it('should block loop-start-source connections during selective activation', () => {
expect(Routing.shouldSkipConnection('loop-start-source', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('loop-start-source', BlockType.AGENT)).toBe(true)
})
it('should block loop-end-source connections during selective activation', () => {
expect(Routing.shouldSkipConnection('loop-end-source', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('loop-end-source', BlockType.AGENT)).toBe(true)
})
})
describe('✅ Still blocked: Condition-specific connections during selective activation', () => {
it('should block condition-specific connections during selective activation', () => {
expect(Routing.shouldSkipConnection('condition-test-if', BlockType.FUNCTION)).toBe(true)
expect(Routing.shouldSkipConnection('condition-test-else', BlockType.AGENT)).toBe(true)
expect(Routing.shouldSkipConnection('condition-some-id', BlockType.PARALLEL)).toBe(true)
})
})
describe('✅ Still works: Regular connections', () => {
it('should allow regular connections between regular blocks', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.FUNCTION)).toBe(false)
expect(Routing.shouldSkipConnection('source', BlockType.AGENT)).toBe(false)
expect(Routing.shouldSkipConnection('output', BlockType.API)).toBe(false)
})
it('should allow regular connections with any source handle (except blocked ones)', () => {
expect(Routing.shouldSkipConnection('result', BlockType.FUNCTION)).toBe(false)
expect(Routing.shouldSkipConnection('output', BlockType.AGENT)).toBe(false)
expect(Routing.shouldSkipConnection('data', BlockType.PARALLEL)).toBe(false)
})
})
})
describe('Real-world workflow scenarios', () => {
describe('✅ Working: User workflows', () => {
it('should support: Start → Agent → Parallel → Agent pattern', () => {
// This is the user's exact workflow pattern that was broken
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
it('should support: Start → Function → Loop → Function pattern', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.LOOP)).toBe(false)
})
it('should support: Start → API → Parallel → Multiple Agents pattern', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
it('should support: Start → Evaluator → Parallel → Response pattern', () => {
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
})
describe('✅ Working: Complex routing patterns', () => {
it('should support: Start → Router → Parallel → Function (existing working pattern)', () => {
// This already worked before the fix
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
it('should support: Start → Condition → Parallel → Agent (existing working pattern)', () => {
// This already worked before the fix
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
it('should support: Start → Router → Function → Parallel → Agent (new working pattern)', () => {
// Router selects function, function activates parallel
expect(Routing.shouldSkipConnection(undefined, BlockType.PARALLEL)).toBe(false)
})
})
})

View File

@@ -1,545 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Parallel Block Activation Regression Tests', () => {
let pathTracker: PathTracker
let mockContext: ExecutionContext
const createMockContext = (workflow: SerializedWorkflow): ExecutionContext => ({
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
executedBlocks: new Set(),
activeExecutionPath: new Set(['start']),
completedLoops: new Set(),
workflow,
})
describe('Original Bug: Agent → Parallel should work', () => {
beforeEach(() => {
// The exact scenario from the user's non-working workflow
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'start',
metadata: { id: BlockType.STARTER, name: 'Start' },
position: { x: 0, y: 0 },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
position: { x: 200, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-1',
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
position: { x: 400, y: 0 },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-2',
metadata: { id: BlockType.AGENT, name: 'Agent 2' },
position: { x: 600, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'agent-1' },
{ source: 'agent-1', target: 'parallel-1' }, // This was broken!
{ source: 'parallel-1', target: 'agent-2', sourceHandle: 'parallel-start-source' },
],
loops: {},
parallels: {
'parallel-1': {
id: 'parallel-1',
nodes: ['agent-2'],
count: 3,
parallelType: 'count',
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
})
it('should allow agent to activate parallel block', () => {
// Agent 1 executes successfully
mockContext.blockStates.set('agent-1', {
output: { content: 'Agent response', usage: { tokens: 100 } },
executed: true,
executionTime: 1000,
})
mockContext.executedBlocks.add('agent-1')
mockContext.activeExecutionPath.add('agent-1')
// Update paths after agent execution
pathTracker.updateExecutionPaths(['agent-1'], mockContext)
// ✅ The parallel block should be activated
expect(mockContext.activeExecutionPath.has('parallel-1')).toBe(true)
})
it('should not activate parallel-start-source connections during path updates', () => {
// Set up parallel block as executed
mockContext.blockStates.set('parallel-1', {
output: { parallelId: 'parallel-1', parallelCount: 3, started: true },
executed: true,
executionTime: 100,
})
mockContext.executedBlocks.add('parallel-1')
mockContext.activeExecutionPath.add('parallel-1')
// Update paths after parallel execution
pathTracker.updateExecutionPaths(['parallel-1'], mockContext)
// ✅ The child agent should NOT be activated via PathTracker (parallel handler manages this)
expect(mockContext.activeExecutionPath.has('agent-2')).toBe(false)
})
})
describe('Regression: Router → Parallel should still work', () => {
beforeEach(() => {
// The working scenario that should continue to work
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'start',
metadata: { id: BlockType.STARTER, name: 'Start' },
position: { x: 0, y: 0 },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
position: { x: 200, y: 0 },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-1',
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
position: { x: 400, y: 0 },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
position: { x: 600, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'router-1' },
{ source: 'router-1', target: 'parallel-1' },
{ source: 'parallel-1', target: 'function-1', sourceHandle: 'parallel-start-source' },
],
loops: {},
parallels: {
'parallel-1': {
id: 'parallel-1',
nodes: ['function-1'],
count: 2,
parallelType: 'count',
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
})
it('should allow router to activate parallel block', () => {
// Router executes and selects parallel
mockContext.blockStates.set('router-1', {
output: {
selectedPath: { blockId: 'parallel-1', blockType: BlockType.PARALLEL },
reasoning: 'Going to parallel',
},
executed: true,
executionTime: 500,
})
mockContext.executedBlocks.add('router-1')
mockContext.activeExecutionPath.add('router-1')
// Update paths after router execution
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// ✅ Router should activate parallel block
expect(mockContext.activeExecutionPath.has('parallel-1')).toBe(true)
})
})
describe('Regression: Condition → Parallel should still work', () => {
beforeEach(() => {
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'start',
metadata: { id: BlockType.STARTER, name: 'Start' },
position: { x: 0, y: 0 },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'condition-1',
metadata: { id: BlockType.CONDITION, name: 'Condition 1' },
position: { x: 200, y: 0 },
config: { tool: BlockType.CONDITION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-1',
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
position: { x: 400, y: 0 },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
position: { x: 400, y: 200 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
position: { x: 600, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'condition-1' },
{ source: 'condition-1', target: 'parallel-1', sourceHandle: 'condition-if' },
{ source: 'condition-1', target: 'function-1', sourceHandle: 'condition-else' },
{ source: 'parallel-1', target: 'agent-1', sourceHandle: 'parallel-start-source' },
],
loops: {},
parallels: {
'parallel-1': {
id: 'parallel-1',
nodes: ['agent-1'],
count: 2,
parallelType: 'count',
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
})
it('should allow condition to activate parallel block when if condition is met', () => {
// Condition executes and selects if path (parallel)
mockContext.blockStates.set('condition-1', {
output: {
selectedConditionId: 'if',
conditionResult: true,
selectedPath: { blockId: 'parallel-1', blockType: BlockType.PARALLEL },
},
executed: true,
executionTime: 200,
})
mockContext.executedBlocks.add('condition-1')
mockContext.activeExecutionPath.add('condition-1')
// Update paths after condition execution
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// ✅ Condition should activate parallel block
expect(mockContext.activeExecutionPath.has('parallel-1')).toBe(true)
// ✅ Function should NOT be activated (else path)
expect(mockContext.activeExecutionPath.has('function-1')).toBe(false)
})
it('should allow condition to activate function block when else condition is met', () => {
// Condition executes and selects else path (function)
mockContext.blockStates.set('condition-1', {
output: {
selectedConditionId: 'else',
conditionResult: false,
selectedPath: { blockId: 'function-1', blockType: BlockType.FUNCTION },
},
executed: true,
executionTime: 200,
})
mockContext.executedBlocks.add('condition-1')
mockContext.activeExecutionPath.add('condition-1')
// Update paths after condition execution
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// ✅ Function should be activated (else path)
expect(mockContext.activeExecutionPath.has('function-1')).toBe(true)
// ✅ Parallel should NOT be activated (if path)
expect(mockContext.activeExecutionPath.has('parallel-1')).toBe(false)
})
})
describe('Regression: All regular blocks should activate parallel/loop', () => {
it.each([
{ blockType: BlockType.FUNCTION, name: 'Function' },
{ blockType: BlockType.AGENT, name: 'Agent' },
{ blockType: BlockType.API, name: 'API' },
{ blockType: BlockType.EVALUATOR, name: 'Evaluator' },
{ blockType: BlockType.RESPONSE, name: 'Response' },
{ blockType: BlockType.WORKFLOW, name: 'Workflow' },
])('should allow $name → Parallel activation', ({ blockType, name }) => {
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'start',
metadata: { id: BlockType.STARTER, name: 'Start' },
position: { x: 0, y: 0 },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'regular-block',
metadata: { id: blockType, name },
position: { x: 200, y: 0 },
config: { tool: blockType, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-1',
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
position: { x: 400, y: 0 },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'target-function',
metadata: { id: BlockType.FUNCTION, name: 'Target Function' },
position: { x: 600, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'regular-block' },
{ source: 'regular-block', target: 'parallel-1' },
{
source: 'parallel-1',
target: 'target-function',
sourceHandle: 'parallel-start-source',
},
],
loops: {},
parallels: {
'parallel-1': {
id: 'parallel-1',
nodes: ['target-function'],
count: 2,
parallelType: 'count',
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
// Regular block executes
mockContext.blockStates.set('regular-block', {
output: { result: 'Success' },
executed: true,
executionTime: 100,
})
mockContext.executedBlocks.add('regular-block')
mockContext.activeExecutionPath.add('regular-block')
// Update paths after regular block execution
pathTracker.updateExecutionPaths(['regular-block'], mockContext)
// ✅ The parallel block should be activated
expect(mockContext.activeExecutionPath.has('parallel-1')).toBe(true)
})
})
describe('Regression: Internal flow control connections should still be blocked', () => {
it('should prevent activation of parallel-start-source connections during selective activation', () => {
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'function-1',
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
position: { x: 0, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
position: { x: 200, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
// This is an internal flow control connection that should be blocked
{ source: 'function-1', target: 'agent-1', sourceHandle: 'parallel-start-source' },
],
loops: {},
parallels: {},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
// Function 1 executes
mockContext.blockStates.set('function-1', {
output: { result: 'Success' },
executed: true,
executionTime: 100,
})
mockContext.executedBlocks.add('function-1')
mockContext.activeExecutionPath.add('function-1')
// Update paths after function execution
pathTracker.updateExecutionPaths(['function-1'], mockContext)
// ❌ Agent should NOT be activated via parallel-start-source during selective activation
expect(mockContext.activeExecutionPath.has('agent-1')).toBe(false)
})
})
describe('Edge Cases', () => {
it('should handle loop blocks the same way as parallel blocks', () => {
const workflow: SerializedWorkflow = {
version: '2.0',
blocks: [
{
id: 'start',
metadata: { id: BlockType.STARTER, name: 'Start' },
position: { x: 0, y: 0 },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
position: { x: 200, y: 0 },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'loop-1',
metadata: { id: BlockType.LOOP, name: 'Loop 1' },
position: { x: 400, y: 0 },
config: { tool: BlockType.LOOP, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
position: { x: 600, y: 0 },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'function-1' },
{ source: 'function-1', target: 'loop-1' }, // Function → Loop should work
{ source: 'loop-1', target: 'agent-1', sourceHandle: 'loop-start-source' },
],
loops: {
'loop-1': {
id: 'loop-1',
nodes: ['agent-1'],
iterations: 3,
loopType: 'for',
},
},
parallels: {},
}
pathTracker = new PathTracker(workflow)
mockContext = createMockContext(workflow)
// Function 1 executes
mockContext.blockStates.set('function-1', {
output: { result: 'Success' },
executed: true,
executionTime: 100,
})
mockContext.executedBlocks.add('function-1')
mockContext.activeExecutionPath.add('function-1')
// Update paths after function execution
pathTracker.updateExecutionPaths(['function-1'], mockContext)
// ✅ Function should be able to activate loop block
expect(mockContext.activeExecutionPath.has('loop-1')).toBe(true)
})
})
})

View File

@@ -1,206 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { ParallelBlockHandler } from '@/executor/handlers/parallel/parallel-handler'
import { PathTracker } from '@/executor/path/path'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Parallel Handler Integration with PathTracker', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let parallelHandler: ParallelBlockHandler
let mockContext: ExecutionContext
beforeEach(() => {
// Create a simplified workflow with condition → parallel scenario
workflow = {
version: '2.0',
blocks: [
{
id: 'condition-1',
position: { x: 0, y: 0 },
metadata: { id: BlockType.CONDITION, name: 'Condition 1' },
config: { tool: BlockType.CONDITION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 100, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'parallel-2',
position: { x: 100, y: 50 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel 2' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-2',
position: { x: 200, y: 50 },
metadata: { id: BlockType.AGENT, name: 'Agent 2' },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
// Condition → Function 2 (if path)
{
source: 'condition-1',
target: 'function-2',
sourceHandle: 'condition-test-if',
},
// Condition → Parallel 2 (else path)
{
source: 'condition-1',
target: 'parallel-2',
sourceHandle: 'condition-test-else',
},
// Parallel 2 → Agent 2
{
source: 'parallel-2',
target: 'agent-2',
sourceHandle: 'parallel-start-source',
},
],
loops: {},
parallels: {
'parallel-2': {
id: 'parallel-2',
nodes: ['agent-2'],
distribution: ['item1', 'item2'],
},
},
}
pathTracker = new PathTracker(workflow)
parallelHandler = new ParallelBlockHandler(undefined, pathTracker)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
})
it('should not allow parallel block to execute when not in active path', async () => {
// Set up scenario where condition selected function-2 (if path), not parallel-2 (else path)
mockContext.decisions.condition.set('condition-1', 'test-if')
mockContext.executedBlocks.add('condition-1')
mockContext.activeExecutionPath.add('condition-1')
mockContext.activeExecutionPath.add('function-2') // Only function-2 should be active
// Parallel-2 should NOT be in active path
expect(mockContext.activeExecutionPath.has('parallel-2')).toBe(false)
// Test PathTracker's isInActivePath method
const isParallel2Active = pathTracker.isInActivePath('parallel-2', mockContext)
expect(isParallel2Active).toBe(false)
// Get the parallel block
const parallelBlock = workflow.blocks.find((b) => b.id === 'parallel-2')!
// Try to execute the parallel block
const result = await parallelHandler.execute(parallelBlock, {}, mockContext)
// The parallel block should execute (return started: true) but should NOT activate its children
expect(result).toMatchObject({
parallelId: 'parallel-2',
started: true,
})
// CRITICAL: Agent 2 should NOT be activated because parallel-2 is not in active path
expect(mockContext.activeExecutionPath.has('agent-2')).toBe(false)
})
it('should allow parallel block to execute and activate children when in active path', async () => {
// Set up scenario where condition selected parallel-2 (else path)
mockContext.decisions.condition.set('condition-1', 'test-else')
mockContext.executedBlocks.add('condition-1')
mockContext.activeExecutionPath.add('condition-1')
mockContext.activeExecutionPath.add('parallel-2') // Parallel-2 should be active
// Parallel-2 should be in active path
expect(mockContext.activeExecutionPath.has('parallel-2')).toBe(true)
// Test PathTracker's isInActivePath method
const isParallel2Active = pathTracker.isInActivePath('parallel-2', mockContext)
expect(isParallel2Active).toBe(true)
// Get the parallel block
const parallelBlock = workflow.blocks.find((b) => b.id === 'parallel-2')!
// Try to execute the parallel block
const result = await parallelHandler.execute(parallelBlock, {}, mockContext)
// The parallel block should execute and activate its children
expect(result).toMatchObject({
parallelId: 'parallel-2',
started: true,
})
// Agent 2 should be activated because parallel-2 is in active path
expect(mockContext.activeExecutionPath.has('agent-2')).toBe(true)
})
it('should test the routing failure scenario with parallel block', async () => {
// Step 1: Condition 1 selects Function 2 (if path)
mockContext.blockStates.set('condition-1', {
output: {
result: 'one',
stdout: '',
conditionResult: true,
selectedPath: {
blockId: 'function-2',
blockType: 'function',
blockTitle: 'Function 2',
},
selectedConditionId: 'test-if',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('condition-1')
mockContext.activeExecutionPath.add('condition-1')
// Update paths after condition execution
pathTracker.updateExecutionPaths(['condition-1'], mockContext)
// Verify condition selected if path
expect(mockContext.decisions.condition.get('condition-1')).toBe('test-if')
expect(mockContext.activeExecutionPath.has('function-2')).toBe(true)
expect(mockContext.activeExecutionPath.has('parallel-2')).toBe(false)
// Step 2: Try to execute parallel-2 (should not activate children)
const parallelBlock = workflow.blocks.find((b) => b.id === 'parallel-2')!
const result = await parallelHandler.execute(parallelBlock, {}, mockContext)
// Parallel should execute but not activate children
expect(result).toMatchObject({
parallelId: 'parallel-2',
started: true,
})
// CRITICAL: Agent 2 should NOT be activated
expect(mockContext.activeExecutionPath.has('agent-2')).toBe(false)
})
})

View File

@@ -1,318 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Router and Condition Block Path Selection in Complex Workflows', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let mockContext: ExecutionContext
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'bd9f4f7d-8aed-4860-a3be-8bebd1931b19',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'f29a40b7-125a-45a7-a670-af14a1498f94',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
position: { x: 200, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'a62902db-fd8d-4851-aa88-acd5e7667497',
position: { x: 200, y: 50 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel 1' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '0494cf56-2520-4e29-98ad-313ea55cf142',
position: { x: 300, y: -50 },
metadata: { id: BlockType.CONDITION, name: 'Condition 1' },
config: { tool: BlockType.CONDITION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '033ea142-3002-4a68-9e12-092b10b8c9c8',
position: { x: 400, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '037140a8-fda3-44e2-896c-6adea53ea30f',
position: { x: 400, y: 0 },
metadata: { id: BlockType.PARALLEL, name: 'Parallel 2' },
config: { tool: BlockType.PARALLEL, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'a91e3a02-b884-4823-8197-30ae498ac94c',
position: { x: 300, y: 100 },
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: '97974a42-cdf4-4810-9caa-b5e339f42ab0',
position: { x: 500, y: 0 },
metadata: { id: BlockType.AGENT, name: 'Agent 2' },
config: { tool: BlockType.AGENT, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
// Start → Router 1
{
source: 'bd9f4f7d-8aed-4860-a3be-8bebd1931b19',
target: 'f29a40b7-125a-45a7-a670-af14a1498f94',
},
// Router 1 → Function 1
{
source: 'f29a40b7-125a-45a7-a670-af14a1498f94',
target: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
},
// Router 1 → Parallel 1
{
source: 'f29a40b7-125a-45a7-a670-af14a1498f94',
target: 'a62902db-fd8d-4851-aa88-acd5e7667497',
},
// Function 1 → Condition 1
{
source: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
target: '0494cf56-2520-4e29-98ad-313ea55cf142',
},
// Condition 1 → Function 2 (if path)
{
source: '0494cf56-2520-4e29-98ad-313ea55cf142',
target: '033ea142-3002-4a68-9e12-092b10b8c9c8',
sourceHandle: 'condition-0494cf56-2520-4e29-98ad-313ea55cf142-if',
},
// Condition 1 → Parallel 2 (else path)
{
source: '0494cf56-2520-4e29-98ad-313ea55cf142',
target: '037140a8-fda3-44e2-896c-6adea53ea30f',
sourceHandle: 'condition-0494cf56-2520-4e29-98ad-313ea55cf142-else',
},
// Parallel 1 → Agent 1
{
source: 'a62902db-fd8d-4851-aa88-acd5e7667497',
target: 'a91e3a02-b884-4823-8197-30ae498ac94c',
sourceHandle: 'parallel-start-source',
},
// Parallel 2 → Agent 2
{
source: '037140a8-fda3-44e2-896c-6adea53ea30f',
target: '97974a42-cdf4-4810-9caa-b5e339f42ab0',
sourceHandle: 'parallel-start-source',
},
],
loops: {},
parallels: {
'a62902db-fd8d-4851-aa88-acd5e7667497': {
id: 'a62902db-fd8d-4851-aa88-acd5e7667497',
nodes: ['a91e3a02-b884-4823-8197-30ae498ac94c'],
distribution: ['item1', 'item2'],
},
'037140a8-fda3-44e2-896c-6adea53ea30f': {
id: '037140a8-fda3-44e2-896c-6adea53ea30f',
nodes: ['97974a42-cdf4-4810-9caa-b5e339f42ab0'],
distribution: ['item1', 'item2'],
},
},
}
pathTracker = new PathTracker(workflow)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
// Initialize execution state
mockContext.executedBlocks.add('bd9f4f7d-8aed-4860-a3be-8bebd1931b19') // Start
mockContext.activeExecutionPath.add('bd9f4f7d-8aed-4860-a3be-8bebd1931b19') // Start
mockContext.activeExecutionPath.add('f29a40b7-125a-45a7-a670-af14a1498f94') // Router 1
})
it('should reproduce the exact router and condition block path selection scenario', () => {
// Step 1: Router 1 executes and selects Function 1 (not Parallel 1)
mockContext.blockStates.set('f29a40b7-125a-45a7-a670-af14a1498f94', {
output: {
selectedPath: {
blockId: 'd09b0a90-2c59-4a2c-af15-c30321e36d9b',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('f29a40b7-125a-45a7-a670-af14a1498f94')
pathTracker.updateExecutionPaths(['f29a40b7-125a-45a7-a670-af14a1498f94'], mockContext)
// Verify router selected Function 1
expect(mockContext.decisions.router.get('f29a40b7-125a-45a7-a670-af14a1498f94')).toBe(
'd09b0a90-2c59-4a2c-af15-c30321e36d9b'
)
expect(mockContext.activeExecutionPath.has('d09b0a90-2c59-4a2c-af15-c30321e36d9b')).toBe(true) // Function 1
// Parallel 1 should NOT be in active path (not selected by router)
expect(mockContext.activeExecutionPath.has('a62902db-fd8d-4851-aa88-acd5e7667497')).toBe(false) // Parallel 1
expect(mockContext.activeExecutionPath.has('a91e3a02-b884-4823-8197-30ae498ac94c')).toBe(false) // Agent 1
// Step 2: Function 1 executes and returns "one"
mockContext.blockStates.set('d09b0a90-2c59-4a2c-af15-c30321e36d9b', {
output: {
result: 'one',
stdout: '',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('d09b0a90-2c59-4a2c-af15-c30321e36d9b')
pathTracker.updateExecutionPaths(['d09b0a90-2c59-4a2c-af15-c30321e36d9b'], mockContext)
// Function 1 should activate Condition 1
expect(mockContext.activeExecutionPath.has('0494cf56-2520-4e29-98ad-313ea55cf142')).toBe(true) // Condition 1
// Parallel 2 should NOT be in active path yet
expect(mockContext.activeExecutionPath.has('037140a8-fda3-44e2-896c-6adea53ea30f')).toBe(false) // Parallel 2
expect(mockContext.activeExecutionPath.has('97974a42-cdf4-4810-9caa-b5e339f42ab0')).toBe(false) // Agent 2
// Step 3: Condition 1 executes and selects Function 2 (if path, not else/parallel path)
mockContext.blockStates.set('0494cf56-2520-4e29-98ad-313ea55cf142', {
output: {
result: 'one',
stdout: '',
conditionResult: true,
selectedPath: {
blockId: '033ea142-3002-4a68-9e12-092b10b8c9c8',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
selectedConditionId: '0494cf56-2520-4e29-98ad-313ea55cf142-if',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('0494cf56-2520-4e29-98ad-313ea55cf142')
pathTracker.updateExecutionPaths(['0494cf56-2520-4e29-98ad-313ea55cf142'], mockContext)
// Verify condition selected the if path (Function 2)
expect(mockContext.decisions.condition.get('0494cf56-2520-4e29-98ad-313ea55cf142')).toBe(
'0494cf56-2520-4e29-98ad-313ea55cf142-if'
)
expect(mockContext.activeExecutionPath.has('033ea142-3002-4a68-9e12-092b10b8c9c8')).toBe(true) // Function 2
// CRITICAL: Parallel 2 should NOT be in active path (condition selected if, not else)
expect(mockContext.activeExecutionPath.has('037140a8-fda3-44e2-896c-6adea53ea30f')).toBe(false) // Parallel 2
expect(mockContext.activeExecutionPath.has('97974a42-cdf4-4810-9caa-b5e339f42ab0')).toBe(false) // Agent 2
// Step 4: Function 2 executes (this should be the end of the workflow)
mockContext.blockStates.set('033ea142-3002-4a68-9e12-092b10b8c9c8', {
output: {
result: 'two',
stdout: '',
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('033ea142-3002-4a68-9e12-092b10b8c9c8')
pathTracker.updateExecutionPaths(['033ea142-3002-4a68-9e12-092b10b8c9c8'], mockContext)
// Final verification: Parallel 2 and Agent 2 should NEVER be in active path
expect(mockContext.activeExecutionPath.has('037140a8-fda3-44e2-896c-6adea53ea30f')).toBe(false) // Parallel 2
expect(mockContext.activeExecutionPath.has('97974a42-cdf4-4810-9caa-b5e339f42ab0')).toBe(false) // Agent 2
// Simulate what executor's getNextExecutionLayer would return
const blocksToExecute = workflow.blocks.filter(
(block) =>
mockContext.activeExecutionPath.has(block.id) && !mockContext.executedBlocks.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should be empty (no more blocks to execute)
expect(blockIds).toHaveLength(0)
// Should NOT include Parallel 2 or Agent 2
expect(blockIds).not.toContain('037140a8-fda3-44e2-896c-6adea53ea30f') // Parallel 2
expect(blockIds).not.toContain('97974a42-cdf4-4810-9caa-b5e339f42ab0') // Agent 2
})
it('should test the isInActivePath method for Parallel 2', () => {
// Set up the same execution state as above
mockContext.executedBlocks.add('f29a40b7-125a-45a7-a670-af14a1498f94') // Router 1
mockContext.executedBlocks.add('d09b0a90-2c59-4a2c-af15-c30321e36d9b') // Function 1
mockContext.executedBlocks.add('0494cf56-2520-4e29-98ad-313ea55cf142') // Condition 1
// Set router decision
mockContext.decisions.router.set(
'f29a40b7-125a-45a7-a670-af14a1498f94',
'd09b0a90-2c59-4a2c-af15-c30321e36d9b'
)
// Set condition decision to if path (not else path)
mockContext.decisions.condition.set(
'0494cf56-2520-4e29-98ad-313ea55cf142',
'0494cf56-2520-4e29-98ad-313ea55cf142-if'
)
// Test isInActivePath for Parallel 2
const isParallel2Active = pathTracker.isInActivePath(
'037140a8-fda3-44e2-896c-6adea53ea30f',
mockContext
)
// Should be false because condition selected if path, not else path
expect(isParallel2Active).toBe(false)
})
})

View File

@@ -1,305 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import { Routing } from '@/executor/routing/routing'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Router → Workflow Block Execution Fix', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let mockContext: ExecutionContext
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'starter',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 200, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-2',
position: { x: 200, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 2' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'workflow-2',
position: { x: 300, y: 50 },
metadata: { id: BlockType.WORKFLOW, name: 'Workflow 2' },
config: { tool: BlockType.WORKFLOW, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'starter', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'router-2' },
{ source: 'router-2', target: 'function-2' },
{ source: 'router-2', target: 'workflow-2' },
],
loops: {},
parallels: {},
}
pathTracker = new PathTracker(workflow)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
// Initialize starter as executed and in active path
mockContext.executedBlocks.add('starter')
mockContext.activeExecutionPath.add('starter')
mockContext.activeExecutionPath.add('router-1')
})
it('should categorize workflow blocks as flow control blocks requiring active path checks', () => {
// Verify that workflow blocks now have the correct routing behavior
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe('flow-control')
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it('should prevent workflow blocks from executing when not selected by router', () => {
// This test recreates the exact bug scenario from the CSV data
// Step 1: Router 1 selects router-2 (not function-1)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
// Update paths after router execution
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Verify router decision
expect(mockContext.decisions.router.get('router-1')).toBe('router-2')
// After router-1 execution, router-2 should be active but not function-1
expect(mockContext.activeExecutionPath.has('router-2')).toBe(true)
expect(mockContext.activeExecutionPath.has('function-1')).toBe(false)
// CRITICAL: Workflow block should NOT be activated yet
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Router 2 selects function-2 (NOT workflow-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
// Update paths after router-2 execution
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('function-2')
// After router-2 execution, function-2 should be active
expect(mockContext.activeExecutionPath.has('function-2')).toBe(true)
// CRITICAL: Workflow block should still NOT be activated (this was the bug!)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 3: Simulate what the executor's getNextExecutionLayer would do
// This mimics the logic from executor/index.ts lines 991-994
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should only include function-2, NOT workflow-2
expect(blockIds).toContain('function-2')
expect(blockIds).not.toContain('workflow-2')
// Verify that workflow block is not in active path
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Verify that isInActivePath also returns false for workflow block
const isWorkflowActive = pathTracker.isInActivePath('workflow-2', mockContext)
expect(isWorkflowActive).toBe(false)
})
it('should allow workflow blocks to execute when selected by router', () => {
// Test the positive case - workflow block should execute when actually selected
// Step 1: Router 1 selects router-2
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Step 2: Router 2 selects workflow-2 (NOT function-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'workflow-2',
blockType: BlockType.WORKFLOW,
blockTitle: 'Workflow 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('workflow-2')
// After router-2 execution, workflow-2 should be active
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(true)
// Function-2 should NOT be activated
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Verify workflow block would be included in next execution layer
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should include workflow-2, NOT function-2
expect(blockIds).toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
})
it('should handle multiple sequential routers with workflow blocks correctly', () => {
// This test ensures the fix works with the exact scenario from the bug report:
// "The issue only seems to happen when there are multiple routing/conditional blocks"
// Simulate the exact execution order from the CSV:
// Router 1 → Function 1, Router 2 → Function 2, but Workflow 2 executed anyway
// Step 1: Router 1 selects function-1 (not router-2)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// After router-1, only function-1 should be active
expect(mockContext.activeExecutionPath.has('function-1')).toBe(true)
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Execute function-1
mockContext.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('function-1')
// Step 3: Check what blocks would be available for next execution
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// CRITICAL: Neither router-2 nor workflow-2 should be eligible for execution
// because they were not selected by router-1
expect(blockIds).not.toContain('router-2')
expect(blockIds).not.toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
// Verify none of the unselected blocks are in active path
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
})
})

View File

@@ -73,6 +73,9 @@ export interface BlockLog {
output?: any // Output data from successful execution
input?: any // Input data for the block execution
error?: string // Error message if execution failed
loopId?: string // Loop ID if this block is part of a loop
parallelId?: string // Parallel ID if this block is part of a parallel
iterationIndex?: number // Iteration number for loop/parallel blocks
}
/**
@@ -107,7 +110,12 @@ export interface ExecutionContext {
// Whether this execution is running against deployed state (API/webhook/schedule/chat)
// Manual executions in the builder should leave this undefined/false
isDeployedContext?: boolean
// CONSOLIDATED STATE - Single source of truth for execution state
// Uses shared references with ExecutionState class
blockStates: Map<string, BlockState>
executedBlocks: Set<string> // Set of block IDs that have been executed
blockLogs: BlockLog[] // Chronological log of block executions
metadata: ExecutionMetadata // Timing metadata for the execution
environmentVariables: Record<string, string> // Environment variables available during execution
@@ -162,8 +170,6 @@ export interface ExecutionContext {
// Current virtual block being executed (for parallel iterations)
currentVirtualBlockId?: string
// Execution tracking
executedBlocks: Set<string> // Set of block IDs that have been executed
activeExecutionPath: Set<string> // Set of block IDs in the current execution path
workflow?: SerializedWorkflow // Reference to the workflow being executed
@@ -174,8 +180,14 @@ export interface ExecutionContext {
edges?: Array<{ source: string; target: string }> // Workflow edge connections
// New context extensions
onStream?: (streamingExecution: StreamingExecution) => Promise<string>
onBlockComplete?: (blockId: string, output: any) => Promise<void>
onStream?: (streamingExecution: StreamingExecution) => Promise<void>
onBlockStart?: (blockId: string, blockName: string, blockType: string) => Promise<void>
onBlockComplete?: (
blockId: string,
blockName: string,
blockType: string,
output: any
) => Promise<void>
}
/**
@@ -235,18 +247,10 @@ export interface BlockHandler {
*/
canHandle(block: SerializedBlock): boolean
/**
* Executes the block with the given inputs and context.
*
* @param block - Block to execute
* @param inputs - Resolved input parameters
* @param context - Current execution context
* @returns Block execution output or StreamingExecution for streaming
*/
execute(
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record<string, any>,
context: ExecutionContext
inputs: Record<string, any>
): Promise<BlockOutput | StreamingExecution>
}

View File

@@ -0,0 +1,9 @@
import type { SerializedLoop } from '@/serializer/types'
export interface LoopConfigWithNodes extends SerializedLoop {
nodes: string[]
}
export function isLoopConfigWithNodes(config: SerializedLoop): config is LoopConfigWithNodes {
return Array.isArray((config as any).nodes)
}

View File

@@ -0,0 +1,11 @@
import type { SerializedParallel } from '@/serializer/types'
export interface ParallelConfigWithNodes extends SerializedParallel {
nodes: string[]
}
export function isParallelConfigWithNodes(
config: SerializedParallel
): config is ParallelConfigWithNodes {
return Array.isArray((config as any).nodes)
}

View File

@@ -14,7 +14,6 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
selectedOutputs: string[],
responseFormat?: any
): ReadableStream {
// Check if this block has response format selected outputs
const hasResponseFormatSelection = selectedOutputs.some((outputId) => {
const blockIdForOutput = outputId.includes('_')
? outputId.split('_')[0]
@@ -22,12 +21,10 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
return blockIdForOutput === blockId && outputId.includes('_')
})
// If no response format selection, return original stream unchanged
if (!hasResponseFormatSelection || !responseFormat) {
return originalStream
}
// Get the selected field names for this block
const selectedFields = selectedOutputs
.filter((outputId) => {
const blockIdForOutput = outputId.includes('_')
@@ -53,7 +50,7 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
blockId: string
): ReadableStream {
let buffer = ''
let hasProcessedComplete = false // Track if we've already processed the complete JSON
let hasProcessedComplete = false
const self = this
@@ -67,7 +64,6 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
const { done, value } = await reader.read()
if (done) {
// Handle any remaining buffer at the end only if we haven't processed complete JSON yet
if (buffer.trim() && !hasProcessedComplete) {
self.processCompleteJson(buffer, selectedFields, controller)
}
@@ -78,13 +74,12 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
const chunk = decoder.decode(value, { stream: true })
buffer += chunk
// Try to process the current buffer only if we haven't processed complete JSON yet
if (!hasProcessedComplete) {
const processedChunk = self.processStreamingChunk(buffer, selectedFields)
if (processedChunk) {
controller.enqueue(new TextEncoder().encode(processedChunk))
hasProcessedComplete = true // Mark as processed to prevent duplicate processing
hasProcessedComplete = true
}
}
}
@@ -99,15 +94,9 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
private processStreamingChunk(buffer: string, selectedFields: string[]): string | null {
// For streaming response format, we need to parse the JSON as it comes in
// and extract only the field values we care about
// Try to parse as complete JSON first
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// We have a complete JSON object, extract the selected fields
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
@@ -118,30 +107,21 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
return result
}
return null
}
} catch (e) {
// Not complete JSON yet, continue buffering
}
} catch (e) {}
// For real-time extraction during streaming, we'd need more sophisticated parsing
// For now, let's handle the case where we receive chunks that might be partial JSON
// Simple heuristic: if buffer contains what looks like a complete JSON object
const openBraces = (buffer.match(/\{/g) || []).length
const closeBraces = (buffer.match(/\}/g) || []).length
if (openBraces > 0 && openBraces === closeBraces) {
// Likely a complete JSON object
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
@@ -152,16 +132,13 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
return result
}
return null
}
} catch (e) {
// Still not valid JSON, continue
}
} catch (e) {}
}
return null
@@ -175,7 +152,6 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
try {
const parsed = JSON.parse(buffer.trim())
if (typeof parsed === 'object' && parsed !== null) {
// Process all selected fields and format them properly
const results: string[] = []
for (const field of selectedFields) {
if (field in parsed) {
@@ -186,7 +162,6 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
if (results.length > 0) {
// Join multiple fields with newlines for readability
const result = results.join('\n')
controller.enqueue(new TextEncoder().encode(result))
}
@@ -197,5 +172,4 @@ export class StreamingResponseFormatProcessor implements ResponseFormatStreamPro
}
}
// Create singleton instance
export const streamingResponseFormatProcessor = new StreamingResponseFormatProcessor()

View File

@@ -0,0 +1,25 @@
import type { ExecutionContext } from '@/executor/types'
export interface BlockDataCollection {
blockData: Record<string, any>
blockNameMapping: Record<string, string>
}
export function collectBlockData(context: ExecutionContext): BlockDataCollection {
const blockData: Record<string, any> = {}
const blockNameMapping: Record<string, string> = {}
for (const [id, state] of context.blockStates.entries()) {
if (state.output !== undefined) {
blockData[id] = state.output
const workflowBlock = context.workflow?.blocks?.find((b) => b.id === id)
if (workflowBlock?.metadata?.name) {
blockNameMapping[workflowBlock.metadata.name] = id
const normalized = workflowBlock.metadata.name.replace(/\s+/g, '').toLowerCase()
blockNameMapping[normalized] = id
}
}
}
return { blockData, blockNameMapping }
}

View File

@@ -0,0 +1,64 @@
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
export interface BlockExecutionErrorDetails {
block: SerializedBlock
error: Error | string
context?: ExecutionContext
additionalInfo?: Record<string, any>
}
export function buildBlockExecutionError(details: BlockExecutionErrorDetails): Error {
const errorMessage =
details.error instanceof Error ? details.error.message : String(details.error)
const blockName = details.block.metadata?.name || details.block.id
const blockType = details.block.metadata?.id || 'unknown'
const error = new Error(`[${blockType}] ${blockName}: ${errorMessage}`)
Object.assign(error, {
blockId: details.block.id,
blockName,
blockType,
workflowId: details.context?.workflowId,
timestamp: new Date().toISOString(),
...details.additionalInfo,
})
return error
}
export function buildHTTPError(config: {
status: number
url?: string
method?: string
message?: string
}): Error {
let errorMessage = config.message || `HTTP ${config.method || 'request'} failed`
if (config.url) {
errorMessage += ` - ${config.url}`
}
if (config.status) {
errorMessage += ` (Status: ${config.status})`
}
const error = new Error(errorMessage)
Object.assign(error, {
status: config.status,
url: config.url,
method: config.method,
timestamp: new Date().toISOString(),
})
return error
}
export function normalizeError(error: unknown): string {
if (error instanceof Error) {
return error.message
}
return String(error)
}

View File

@@ -0,0 +1,41 @@
import { generateInternalToken } from '@/lib/auth/internal'
import { getBaseUrl } from '@/lib/urls/utils'
import { HTTP } from '@/executor/consts'
export async function buildAuthHeaders(): Promise<Record<string, string>> {
const headers: Record<string, string> = {
'Content-Type': HTTP.CONTENT_TYPE.JSON,
}
if (typeof window === 'undefined') {
const token = await generateInternalToken()
headers.Authorization = `Bearer ${token}`
}
return headers
}
export function buildAPIUrl(path: string, params?: Record<string, string>): URL {
const url = new URL(path, getBaseUrl())
if (params) {
for (const [key, value] of Object.entries(params)) {
if (value !== undefined && value !== null) {
url.searchParams.set(key, value)
}
}
}
return url
}
export async function extractAPIErrorMessage(response: Response): Promise<string> {
const defaultMessage = `API request failed with status ${response.status}`
try {
const errorData = await response.json()
return errorData.error || defaultMessage
} catch {
return defaultMessage
}
}

View File

@@ -0,0 +1,45 @@
import { createLogger } from '@/lib/logs/console/logger'
import { EVALUATOR } from '@/executor/consts'
const logger = createLogger('JSONUtils')
export function parseJSON<T>(value: unknown, fallback: T): T {
if (typeof value !== 'string') {
return fallback
}
try {
return JSON.parse(value.trim())
} catch (error) {
logger.debug('Failed to parse JSON, using fallback', {
error: error instanceof Error ? error.message : String(error),
})
return fallback
}
}
export function parseJSONOrThrow(value: string): any {
try {
return JSON.parse(value.trim())
} catch (error) {
throw new Error(`Invalid JSON: ${error instanceof Error ? error.message : 'Parse error'}`)
}
}
export function normalizeJSONString(value: string): string {
return value.replace(/'/g, '"')
}
export function stringifyJSON(value: any, indent?: number): string {
try {
return JSON.stringify(value, null, indent ?? EVALUATOR.JSON_INDENT)
} catch (error) {
logger.warn('Failed to stringify value, returning string representation', { error })
return String(value)
}
}
export function isJSONString(value: string): boolean {
const trimmed = value.trim()
return trimmed.startsWith('{') || trimmed.startsWith('[')
}

View File

@@ -0,0 +1,125 @@
import { describe, expect, it } from 'vitest'
import { StartBlockPath } from '@/lib/workflows/triggers'
import type { UserFile } from '@/executor/types'
import {
buildResolutionFromBlock,
buildStartBlockOutput,
resolveExecutorStartBlock,
} from '@/executor/utils/start-block'
import type { SerializedBlock } from '@/serializer/types'
function createBlock(
type: string,
id = type,
options?: { subBlocks?: Record<string, unknown> }
): SerializedBlock {
return {
id,
position: { x: 0, y: 0 },
config: {
tool: type,
params: options?.subBlocks?.inputFormat ? { inputFormat: options.subBlocks.inputFormat } : {},
},
inputs: {},
outputs: {},
metadata: {
id: type,
name: `block-${type}`,
category: 'triggers',
...(options?.subBlocks ? { subBlocks: options.subBlocks } : {}),
} as SerializedBlock['metadata'] & { subBlocks?: Record<string, unknown> },
enabled: true,
}
}
describe('start-block utilities', () => {
it('buildResolutionFromBlock returns null when metadata id missing', () => {
const block = createBlock('api_trigger')
;(block.metadata as Record<string, unknown>).id = undefined
expect(buildResolutionFromBlock(block)).toBeNull()
})
it('resolveExecutorStartBlock prefers unified start block', () => {
const blocks = [
createBlock('api_trigger', 'api'),
createBlock('starter', 'starter'),
createBlock('start_trigger', 'start'),
]
const resolution = resolveExecutorStartBlock(blocks, {
execution: 'api',
isChildWorkflow: false,
})
expect(resolution?.blockId).toBe('start')
expect(resolution?.path).toBe(StartBlockPath.UNIFIED)
})
it('buildStartBlockOutput normalizes unified start payload', () => {
const block = createBlock('start_trigger', 'start')
const resolution = {
blockId: 'start',
block,
path: StartBlockPath.UNIFIED,
} as const
const output = buildStartBlockOutput({
resolution,
workflowInput: { payload: 'value' },
isDeployedExecution: true,
})
expect(output.payload).toBe('value')
expect(output.input).toBe('')
expect(output.conversationId).toBe('')
})
it('buildStartBlockOutput uses trigger schema for API triggers', () => {
const apiBlock = createBlock('api_trigger', 'api', {
subBlocks: {
inputFormat: {
value: [
{ name: 'name', type: 'string' },
{ name: 'count', type: 'number' },
],
},
},
})
const resolution = {
blockId: 'api',
block: apiBlock,
path: StartBlockPath.SPLIT_API,
} as const
const files: UserFile[] = [
{
id: 'file-1',
name: 'document.txt',
url: 'https://example.com/document.txt',
size: 42,
type: 'text/plain',
key: 'file-key',
uploadedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 1000).toISOString(),
},
]
const output = buildStartBlockOutput({
resolution,
workflowInput: {
input: {
name: 'Ada',
count: '5',
},
files,
},
isDeployedExecution: false,
})
expect(output.name).toBe('Ada')
expect(output.input).toEqual({ name: 'Ada', count: 5 })
expect(output.files).toEqual(files)
})
})

View File

@@ -0,0 +1,410 @@
import {
classifyStartBlockType,
getLegacyStarterMode,
resolveStartCandidates,
StartBlockPath,
} from '@/lib/workflows/triggers'
import type { NormalizedBlockOutput, UserFile } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
type ExecutionKind = 'chat' | 'manual' | 'api'
export interface ExecutorStartResolution {
blockId: string
block: SerializedBlock
path: StartBlockPath
}
export interface ResolveExecutorStartOptions {
execution: ExecutionKind
isChildWorkflow: boolean
}
type StartCandidateWrapper = {
type: string
subBlocks?: Record<string, unknown>
original: SerializedBlock
}
export function resolveExecutorStartBlock(
blocks: SerializedBlock[],
options: ResolveExecutorStartOptions
): ExecutorStartResolution | null {
if (blocks.length === 0) {
return null
}
const blockMap = blocks.reduce<Record<string, StartCandidateWrapper>>((acc, block) => {
const type = block.metadata?.id
if (!type) {
return acc
}
acc[block.id] = {
type,
subBlocks: extractSubBlocks(block),
original: block,
}
return acc
}, {})
const candidates = resolveStartCandidates(blockMap, {
execution: options.execution,
isChildWorkflow: options.isChildWorkflow,
})
if (candidates.length === 0) {
return null
}
if (options.isChildWorkflow && candidates.length > 1) {
throw new Error('Child workflow has multiple trigger blocks. Keep only one Start block.')
}
const [primary] = candidates
return {
blockId: primary.blockId,
block: primary.block.original,
path: primary.path,
}
}
export function buildResolutionFromBlock(block: SerializedBlock): ExecutorStartResolution | null {
const type = block.metadata?.id
if (!type) {
return null
}
const path = classifyStartBlockType(type)
if (!path) {
return null
}
return {
blockId: block.id,
block,
path,
}
}
type InputFormatField = {
name?: string
type?: string | null
value?: unknown
}
function isPlainObject(value: unknown): value is Record<string, unknown> {
return typeof value === 'object' && value !== null && !Array.isArray(value)
}
function readMetadataSubBlockValue(block: SerializedBlock, key: string): unknown {
const metadata = block.metadata
if (!metadata || typeof metadata !== 'object') {
return undefined
}
const maybeWithSubBlocks = metadata as typeof metadata & {
subBlocks?: Record<string, unknown>
}
const raw = maybeWithSubBlocks.subBlocks?.[key]
if (!raw || typeof raw !== 'object' || Array.isArray(raw)) {
return undefined
}
return (raw as { value?: unknown }).value
}
function extractInputFormat(block: SerializedBlock): InputFormatField[] {
const fromMetadata = readMetadataSubBlockValue(block, 'inputFormat')
const fromParams = block.config?.params?.inputFormat
const source = fromMetadata ?? fromParams
if (!Array.isArray(source)) {
return []
}
return source
.filter((field): field is InputFormatField => isPlainObject(field))
.map((field) => field)
}
function coerceValue(type: string | null | undefined, value: unknown): unknown {
if (value === undefined || value === null) {
return value
}
switch (type) {
case 'string':
return typeof value === 'string' ? value : String(value)
case 'number': {
if (typeof value === 'number') return value
const parsed = Number(value)
return Number.isNaN(parsed) ? value : parsed
}
case 'boolean': {
if (typeof value === 'boolean') return value
if (value === 'true' || value === '1' || value === 1) return true
if (value === 'false' || value === '0' || value === 0) return false
return value
}
case 'object':
case 'array': {
if (typeof value === 'string') {
try {
const parsed = JSON.parse(value)
return parsed
} catch {
return value
}
}
return value
}
default:
return value
}
}
interface DerivedInputResult {
structuredInput: Record<string, unknown>
finalInput: unknown
hasStructured: boolean
}
function deriveInputFromFormat(
inputFormat: InputFormatField[],
workflowInput: unknown,
isDeployedExecution: boolean
): DerivedInputResult {
const structuredInput: Record<string, unknown> = {}
if (inputFormat.length === 0) {
return {
structuredInput,
finalInput: getRawInputCandidate(workflowInput),
hasStructured: false,
}
}
for (const field of inputFormat) {
const fieldName = field.name?.trim()
if (!fieldName) continue
let fieldValue: unknown
const workflowRecord = isPlainObject(workflowInput) ? workflowInput : undefined
if (workflowRecord) {
const inputContainer = workflowRecord.input
if (isPlainObject(inputContainer) && Object.hasOwn(inputContainer, fieldName)) {
fieldValue = inputContainer[fieldName]
} else if (Object.hasOwn(workflowRecord, fieldName)) {
fieldValue = workflowRecord[fieldName]
}
}
if ((fieldValue === undefined || fieldValue === null) && !isDeployedExecution) {
fieldValue = field.value
}
structuredInput[fieldName] = coerceValue(field.type, fieldValue)
}
const hasStructured = Object.keys(structuredInput).length > 0
const finalInput = hasStructured ? structuredInput : getRawInputCandidate(workflowInput)
return {
structuredInput,
finalInput,
hasStructured,
}
}
function getRawInputCandidate(workflowInput: unknown): unknown {
if (isPlainObject(workflowInput) && Object.hasOwn(workflowInput, 'input')) {
return workflowInput.input
}
return workflowInput
}
function isUserFile(candidate: unknown): candidate is UserFile {
if (!isPlainObject(candidate)) {
return false
}
return (
typeof candidate.id === 'string' &&
typeof candidate.name === 'string' &&
typeof candidate.url === 'string' &&
typeof candidate.size === 'number' &&
typeof candidate.type === 'string'
)
}
function getFilesFromWorkflowInput(workflowInput: unknown): UserFile[] | undefined {
if (!isPlainObject(workflowInput)) {
return undefined
}
const files = workflowInput.files
if (Array.isArray(files) && files.every(isUserFile)) {
return files
}
return undefined
}
function mergeFilesIntoOutput(
output: NormalizedBlockOutput,
workflowInput: unknown
): NormalizedBlockOutput {
const files = getFilesFromWorkflowInput(workflowInput)
if (files) {
output.files = files
}
return output
}
function ensureString(value: unknown): string {
return typeof value === 'string' ? value : ''
}
function buildUnifiedStartOutput(workflowInput: unknown): NormalizedBlockOutput {
const output: NormalizedBlockOutput = {}
if (isPlainObject(workflowInput)) {
for (const [key, value] of Object.entries(workflowInput)) {
if (key === 'onUploadError') continue
output[key] = value
}
}
if (!Object.hasOwn(output, 'input')) {
output.input = ''
}
if (!Object.hasOwn(output, 'conversationId')) {
output.conversationId = ''
}
return mergeFilesIntoOutput(output, workflowInput)
}
function buildApiOrInputOutput(finalInput: unknown, workflowInput: unknown): NormalizedBlockOutput {
const isObjectInput = isPlainObject(finalInput)
const output: NormalizedBlockOutput = isObjectInput
? {
...(finalInput as Record<string, unknown>),
input: { ...(finalInput as Record<string, unknown>) },
}
: { input: finalInput }
return mergeFilesIntoOutput(output, workflowInput)
}
function buildChatOutput(workflowInput: unknown): NormalizedBlockOutput {
const source = isPlainObject(workflowInput) ? workflowInput : undefined
const output: NormalizedBlockOutput = {
input: ensureString(source?.input),
conversationId: ensureString(source?.conversationId),
}
return mergeFilesIntoOutput(output, workflowInput)
}
function buildLegacyStarterOutput(
finalInput: unknown,
workflowInput: unknown,
mode: 'manual' | 'api' | 'chat' | null
): NormalizedBlockOutput {
if (mode === 'chat') {
return buildChatOutput(workflowInput)
}
const output: NormalizedBlockOutput = {}
const finalObject = isPlainObject(finalInput) ? finalInput : undefined
if (finalObject) {
Object.assign(output, finalObject)
output.input = { ...finalObject }
} else {
output.input = finalInput
}
const conversationId = isPlainObject(workflowInput) ? workflowInput.conversationId : undefined
if (conversationId !== undefined) {
output.conversationId = ensureString(conversationId)
}
return mergeFilesIntoOutput(output, workflowInput)
}
function buildManualTriggerOutput(
finalInput: unknown,
workflowInput: unknown
): NormalizedBlockOutput {
const finalObject = isPlainObject(finalInput) ? finalInput : undefined
const output: NormalizedBlockOutput = finalObject
? { ...(finalObject as Record<string, unknown>) }
: { input: finalInput }
if (!Object.hasOwn(output, 'input')) {
output.input = getRawInputCandidate(workflowInput)
}
return mergeFilesIntoOutput(output, workflowInput)
}
function extractSubBlocks(block: SerializedBlock): Record<string, unknown> | undefined {
const metadata = block.metadata
if (!metadata || typeof metadata !== 'object') {
return undefined
}
const maybeWithSubBlocks = metadata as typeof metadata & {
subBlocks?: Record<string, unknown>
}
const subBlocks = maybeWithSubBlocks.subBlocks
if (subBlocks && typeof subBlocks === 'object' && !Array.isArray(subBlocks)) {
return subBlocks
}
return undefined
}
export interface StartBlockOutputOptions {
resolution: ExecutorStartResolution
workflowInput: unknown
isDeployedExecution: boolean
}
export function buildStartBlockOutput(options: StartBlockOutputOptions): NormalizedBlockOutput {
const { resolution, workflowInput, isDeployedExecution } = options
const inputFormat = extractInputFormat(resolution.block)
const { finalInput } = deriveInputFromFormat(inputFormat, workflowInput, isDeployedExecution)
switch (resolution.path) {
case StartBlockPath.UNIFIED:
return buildUnifiedStartOutput(workflowInput)
case StartBlockPath.SPLIT_API:
case StartBlockPath.SPLIT_INPUT:
return buildApiOrInputOutput(finalInput, workflowInput)
case StartBlockPath.SPLIT_CHAT:
return buildChatOutput(workflowInput)
case StartBlockPath.SPLIT_MANUAL:
return buildManualTriggerOutput(finalInput, workflowInput)
case StartBlockPath.LEGACY_STARTER:
return buildLegacyStarterOutput(
finalInput,
workflowInput,
getLegacyStarterMode({ subBlocks: extractSubBlocks(resolution.block) })
)
default:
return buildManualTriggerOutput(finalInput, workflowInput)
}
}

View File

@@ -0,0 +1,124 @@
import { createLogger } from '@/lib/logs/console/logger'
import { LOOP, PARALLEL, PARSING, REFERENCE } from '@/executor/consts'
import type { SerializedParallel } from '@/serializer/types'
const logger = createLogger('SubflowUtils')
const BRANCH_PATTERN = new RegExp(`${PARALLEL.BRANCH.PREFIX}\\d+${PARALLEL.BRANCH.SUFFIX}$`)
const BRANCH_INDEX_PATTERN = new RegExp(`${PARALLEL.BRANCH.PREFIX}(\\d+)${PARALLEL.BRANCH.SUFFIX}$`)
const SENTINEL_START_PATTERN = new RegExp(
`${LOOP.SENTINEL.PREFIX}(.+)${LOOP.SENTINEL.START_SUFFIX}`
)
const SENTINEL_END_PATTERN = new RegExp(`${LOOP.SENTINEL.PREFIX}(.+)${LOOP.SENTINEL.END_SUFFIX}`)
/**
* ==================
* LOOP UTILITIES
* ==================
*/
/**
* Build sentinel start node ID
*/
export function buildSentinelStartId(loopId: string): string {
return `${LOOP.SENTINEL.PREFIX}${loopId}${LOOP.SENTINEL.START_SUFFIX}`
}
/**
* Build sentinel end node ID
*/
export function buildSentinelEndId(loopId: string): string {
return `${LOOP.SENTINEL.PREFIX}${loopId}${LOOP.SENTINEL.END_SUFFIX}`
}
/**
* Check if a node ID is a sentinel node
*/
export function isSentinelNodeId(nodeId: string): boolean {
return nodeId.includes(LOOP.SENTINEL.START_SUFFIX) || nodeId.includes(LOOP.SENTINEL.END_SUFFIX)
}
export function extractLoopIdFromSentinel(sentinelId: string): string | null {
const startMatch = sentinelId.match(SENTINEL_START_PATTERN)
if (startMatch) return startMatch[1]
const endMatch = sentinelId.match(SENTINEL_END_PATTERN)
if (endMatch) return endMatch[1]
return null
}
/**
* ==================
* PARALLEL UTILITIES
* ==================
*/
/**
* Parse distribution items from parallel config
* Handles: arrays, JSON strings, and references
*/
export function parseDistributionItems(config: SerializedParallel): any[] {
const rawItems = config.distribution ?? []
if (typeof rawItems === 'string' && rawItems.startsWith(REFERENCE.START)) {
return []
}
if (typeof rawItems === 'string') {
try {
const normalizedJSON = rawItems.replace(/'/g, '"')
return JSON.parse(normalizedJSON)
} catch (error) {
logger.error('Failed to parse distribution items', {
rawItems,
error: error instanceof Error ? error.message : String(error),
})
return []
}
}
if (Array.isArray(rawItems)) {
return rawItems
}
if (typeof rawItems === 'object' && rawItems !== null) {
return [rawItems]
}
return []
}
/**
* Calculate branch count from parallel config
*/
export function calculateBranchCount(config: SerializedParallel, distributionItems: any[]): number {
const explicitCount = config.count ?? PARALLEL.DEFAULT_COUNT
if (config.parallelType === PARALLEL.TYPE.COLLECTION && distributionItems.length > 0) {
return distributionItems.length
}
return explicitCount
}
/**
* Build branch node ID with subscript notation
* Example: ("blockId", 2) → "blockId₍2₎"
*/
export function buildBranchNodeId(baseId: string, branchIndex: number): string {
return `${baseId}${PARALLEL.BRANCH.PREFIX}${branchIndex}${PARALLEL.BRANCH.SUFFIX}`
}
export function extractBaseBlockId(branchNodeId: string): string {
return branchNodeId.replace(BRANCH_PATTERN, '')
}
export function extractBranchIndex(branchNodeId: string): number | null {
const match = branchNodeId.match(BRANCH_INDEX_PATTERN)
return match ? Number.parseInt(match[1], PARSING.JSON_RADIX) : null
}
export function isBranchNodeId(nodeId: string): boolean {
return BRANCH_PATTERN.test(nodeId)
}
export function isLoopNode(nodeId: string): boolean {
return isSentinelNodeId(nodeId) || nodeId.startsWith(LOOP.SENTINEL.PREFIX)
}
export function isParallelNode(nodeId: string): boolean {
return isBranchNodeId(nodeId)
}
export function normalizeNodeId(nodeId: string): string {
if (isBranchNodeId(nodeId)) {
return extractBaseBlockId(nodeId)
}
if (isSentinelNodeId(nodeId)) {
return extractLoopIdFromSentinel(nodeId) || nodeId
}
return nodeId
}

View File

@@ -1,54 +0,0 @@
/**
* Utility functions for managing virtual block IDs in parallel execution.
* Virtual blocks allow the same block to be executed multiple times with different contexts.
*/
export class VirtualBlockUtils {
/**
* Generate a virtual block ID for parallel execution.
*/
static generateParallelId(originalId: string, parallelId: string, iteration: number): string {
return `${originalId}_parallel_${parallelId}_iteration_${iteration}`
}
/**
* Extract the original block ID from a virtual block ID.
*/
static extractOriginalId(virtualOrOriginalId: string): string {
if (VirtualBlockUtils.isVirtualId(virtualOrOriginalId)) {
// Virtual IDs have format: originalId_parallel_parallelId_iteration_N
const parts = virtualOrOriginalId.split('_parallel_')
return parts[0] || virtualOrOriginalId
}
return virtualOrOriginalId
}
/**
* Check if an ID is a virtual block ID.
*/
static isVirtualId(id: string): boolean {
return id.includes('_parallel_') && id.includes('_iteration_')
}
/**
* Parse a virtual block ID to extract its components.
* Returns null if the ID is not a virtual ID.
*/
static parseVirtualId(
virtualId: string
): { originalId: string; parallelId: string; iteration: number } | null {
if (!VirtualBlockUtils.isVirtualId(virtualId)) {
return null
}
const parallelMatch = virtualId.match(/^(.+)_parallel_(.+)_iteration_(\d+)$/)
if (parallelMatch) {
return {
originalId: parallelMatch[1]!,
parallelId: parallelMatch[2]!,
iteration: Number.parseInt(parallelMatch[3]!, 10),
}
}
return null
}
}

View File

@@ -0,0 +1,319 @@
import { createLogger } from '@/lib/logs/console/logger'
import { BlockType, REFERENCE } from '@/executor/consts'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
import type { ExecutionState, LoopScope } from '../execution/state'
import { BlockResolver } from './resolvers/block'
import { EnvResolver } from './resolvers/env'
import { LoopResolver } from './resolvers/loop'
import { ParallelResolver } from './resolvers/parallel'
import type { ResolutionContext, Resolver } from './resolvers/reference'
import { WorkflowResolver } from './resolvers/workflow'
const logger = createLogger('VariableResolver')
const INVALID_REFERENCE_CHARS = /[+*/=<>!]/
function isLikelyReferenceSegment(segment: string): boolean {
if (!segment.startsWith(REFERENCE.START) || !segment.endsWith(REFERENCE.END)) {
return false
}
const inner = segment.slice(1, -1)
// Starts with space - not a reference
if (inner.startsWith(' ')) {
return false
}
// Contains only comparison operators or has operators with spaces
if (inner.match(/^\s*[<>=!]+\s*$/) || inner.match(/\s[<>=!]+\s/)) {
return false
}
// Starts with comparison operator followed by space
if (inner.match(/^[<>=!]+\s/)) {
return false
}
// For dotted references (like <block.field>)
if (inner.includes('.')) {
const dotIndex = inner.indexOf('.')
const beforeDot = inner.substring(0, dotIndex)
const afterDot = inner.substring(dotIndex + 1)
// No spaces after dot
if (afterDot.includes(' ')) {
return false
}
// No invalid chars in either part
if (INVALID_REFERENCE_CHARS.test(beforeDot) || INVALID_REFERENCE_CHARS.test(afterDot)) {
return false
}
} else if (INVALID_REFERENCE_CHARS.test(inner) || inner.match(/^\d/) || inner.match(/\s\d/)) {
// No invalid chars, doesn't start with digit, no space before digit
return false
}
return true
}
export class VariableResolver {
private resolvers: Resolver[]
private blockResolver: BlockResolver
constructor(
private workflow: SerializedWorkflow,
private workflowVariables: Record<string, any>,
private state: ExecutionState
) {
this.blockResolver = new BlockResolver(workflow)
this.resolvers = [
new LoopResolver(workflow),
new ParallelResolver(workflow),
new WorkflowResolver(workflowVariables),
new EnvResolver(),
this.blockResolver,
]
}
resolveInputs(
ctx: ExecutionContext,
currentNodeId: string,
params: Record<string, any>,
block?: SerializedBlock
): Record<string, any> {
if (!params) {
return {}
}
const resolved: Record<string, any> = {}
const isConditionBlock = block?.metadata?.id === BlockType.CONDITION
if (isConditionBlock && typeof params.conditions === 'string') {
try {
const parsed = JSON.parse(params.conditions)
if (Array.isArray(parsed)) {
resolved.conditions = parsed.map((cond: any) => ({
...cond,
value:
typeof cond.value === 'string'
? this.resolveTemplateWithoutConditionFormatting(ctx, currentNodeId, cond.value)
: cond.value,
}))
} else {
resolved.conditions = this.resolveValue(
ctx,
currentNodeId,
params.conditions,
undefined,
block
)
}
} catch (parseError) {
logger.warn('Failed to parse conditions JSON, falling back to normal resolution', {
error: parseError,
conditions: params.conditions,
})
resolved.conditions = this.resolveValue(
ctx,
currentNodeId,
params.conditions,
undefined,
block
)
}
}
for (const [key, value] of Object.entries(params)) {
if (isConditionBlock && key === 'conditions') {
continue
}
resolved[key] = this.resolveValue(ctx, currentNodeId, value, undefined, block)
}
return resolved
}
resolveSingleReference(
ctx: ExecutionContext,
currentNodeId: string,
reference: string,
loopScope?: LoopScope
): any {
return this.resolveValue(ctx, currentNodeId, reference, loopScope)
}
private resolveValue(
ctx: ExecutionContext,
currentNodeId: string,
value: any,
loopScope?: LoopScope,
block?: SerializedBlock
): any {
if (value === null || value === undefined) {
return value
}
if (Array.isArray(value)) {
return value.map((v) => this.resolveValue(ctx, currentNodeId, v, loopScope, block))
}
if (typeof value === 'object') {
return Object.entries(value).reduce(
(acc, [key, val]) => ({
...acc,
[key]: this.resolveValue(ctx, currentNodeId, val, loopScope, block),
}),
{}
)
}
if (typeof value === 'string') {
return this.resolveTemplate(ctx, currentNodeId, value, loopScope, block)
}
return value
}
private resolveTemplate(
ctx: ExecutionContext,
currentNodeId: string,
template: string,
loopScope?: LoopScope,
block?: SerializedBlock
): string {
let result = template
const resolutionContext: ResolutionContext = {
executionContext: ctx,
executionState: this.state,
currentNodeId,
loopScope,
}
const referenceRegex = new RegExp(
`${REFERENCE.START}([^${REFERENCE.END}]+)${REFERENCE.END}`,
'g'
)
let replacementError: Error | null = null
result = result.replace(referenceRegex, (match) => {
if (replacementError) return match
if (!isLikelyReferenceSegment(match)) {
return match
}
try {
const resolved = this.resolveReference(match, resolutionContext)
if (resolved === undefined) {
return match
}
const blockType = block?.metadata?.id
const isInTemplateLiteral =
blockType === BlockType.FUNCTION &&
template.includes('${') &&
template.includes('}') &&
template.includes('`')
return this.blockResolver.formatValueForBlock(resolved, blockType, isInTemplateLiteral)
} catch (error) {
replacementError = error instanceof Error ? error : new Error(String(error))
return match
}
})
if (replacementError !== null) {
throw replacementError
}
const envRegex = new RegExp(`${REFERENCE.ENV_VAR_START}([^}]+)${REFERENCE.ENV_VAR_END}`, 'g')
result = result.replace(envRegex, (match) => {
const resolved = this.resolveReference(match, resolutionContext)
return typeof resolved === 'string' ? resolved : match
})
return result
}
/**
* Resolves template string but without condition-specific formatting.
* Used when resolving condition values that are already parsed from JSON.
*/
private resolveTemplateWithoutConditionFormatting(
ctx: ExecutionContext,
currentNodeId: string,
template: string,
loopScope?: LoopScope
): string {
let result = template
const resolutionContext: ResolutionContext = {
executionContext: ctx,
executionState: this.state,
currentNodeId,
loopScope,
}
const referenceRegex = new RegExp(
`${REFERENCE.START}([^${REFERENCE.END}]+)${REFERENCE.END}`,
'g'
)
let replacementError: Error | null = null
result = result.replace(referenceRegex, (match) => {
if (replacementError) return match
if (!isLikelyReferenceSegment(match)) {
return match
}
try {
const resolved = this.resolveReference(match, resolutionContext)
if (resolved === undefined) {
return match
}
// Format value for JavaScript evaluation
// Strings need to be quoted, objects need JSON.stringify
if (typeof resolved === 'string') {
// Escape backslashes first, then single quotes, then wrap in single quotes
const escaped = resolved.replace(/\\/g, '\\\\').replace(/'/g, "\\'")
return `'${escaped}'`
}
if (typeof resolved === 'object' && resolved !== null) {
return JSON.stringify(resolved)
}
// For numbers, booleans, null, undefined - use as-is
return String(resolved)
} catch (error) {
replacementError = error instanceof Error ? error : new Error(String(error))
return match
}
})
if (replacementError !== null) {
throw replacementError
}
const envRegex = new RegExp(`${REFERENCE.ENV_VAR_START}([^}]+)${REFERENCE.ENV_VAR_END}`, 'g')
result = result.replace(envRegex, (match) => {
const resolved = this.resolveReference(match, resolutionContext)
return typeof resolved === 'string' ? resolved : match
})
return result
}
private resolveReference(reference: string, context: ResolutionContext): any {
for (const resolver of this.resolvers) {
if (resolver.canResolve(reference)) {
const result = resolver.resolve(reference, context)
logger.debug('Reference resolved', {
reference,
resolver: resolver.constructor.name,
result,
})
return result
}
}
logger.warn('No resolver found for reference', { reference })
return undefined
}
}

View File

@@ -0,0 +1,257 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isReference, parseReferencePath, SPECIAL_REFERENCE_PREFIXES } from '@/executor/consts'
import type { SerializedWorkflow } from '@/serializer/types'
import { normalizeBlockName } from '@/stores/workflows/utils'
import type { ResolutionContext, Resolver } from './reference'
const logger = createLogger('BlockResolver')
export class BlockResolver implements Resolver {
private blockByNormalizedName: Map<string, string>
constructor(private workflow: SerializedWorkflow) {
this.blockByNormalizedName = new Map()
for (const block of workflow.blocks) {
this.blockByNormalizedName.set(block.id, block.id)
if (block.metadata?.name) {
const normalized = normalizeBlockName(block.metadata.name)
this.blockByNormalizedName.set(normalized, block.id)
}
}
}
canResolve(reference: string): boolean {
if (!isReference(reference)) {
return false
}
const parts = parseReferencePath(reference)
if (parts.length === 0) {
return false
}
const [type] = parts
return !SPECIAL_REFERENCE_PREFIXES.includes(type as any)
}
resolve(reference: string, context: ResolutionContext): any {
const parts = parseReferencePath(reference)
if (parts.length === 0) {
return undefined
}
const [blockName, ...pathParts] = parts
logger.debug('Resolving block reference', {
reference,
blockName,
pathParts,
})
const blockId = this.findBlockIdByName(blockName)
if (!blockId) {
logger.error('Block not found by name', { blockName, reference })
throw new Error(`Block "${blockName}" not found`)
}
const output = this.getBlockOutput(blockId, context)
logger.debug('Block output retrieved', {
blockName,
blockId,
hasOutput: !!output,
outputKeys: output ? Object.keys(output) : [],
})
if (!output) {
throw new Error(`No state found for block "${blockName}"`)
}
if (pathParts.length === 0) {
return output
}
const result = this.navigatePath(output, pathParts)
if (result === undefined) {
const availableKeys = output && typeof output === 'object' ? Object.keys(output) : []
throw new Error(
`No value found at path "${pathParts.join('.')}" in block "${blockName}". Available fields: ${availableKeys.join(', ')}`
)
}
logger.debug('Navigated path result', {
blockName,
pathParts,
result,
})
return result
}
private getBlockOutput(blockId: string, context: ResolutionContext): any {
const stateOutput = context.executionState.getBlockOutput(blockId)
if (stateOutput !== undefined) {
return stateOutput
}
const contextState = context.executionContext.blockStates?.get(blockId)
if (contextState?.output) {
return contextState.output
}
return undefined
}
private findBlockIdByName(name: string): string | undefined {
if (this.blockByNormalizedName.has(name)) {
return this.blockByNormalizedName.get(name)
}
const normalized = normalizeBlockName(name)
return this.blockByNormalizedName.get(normalized)
}
private navigatePath(obj: any, path: string[]): any {
let current = obj
for (const part of path) {
if (current === null || current === undefined) {
return undefined
}
const arrayMatch = part.match(/^([^[]+)\[(\d+)\](.*)$/)
if (arrayMatch) {
current = this.resolvePartWithIndices(current, part, '', 'block')
} else if (/^\d+$/.test(part)) {
const index = Number.parseInt(part, 10)
current = Array.isArray(current) ? current[index] : undefined
} else {
current = current[part]
}
}
return current
}
private resolvePartWithIndices(
base: any,
part: string,
fullPath: string,
sourceName: string
): any {
let value = base
const propMatch = part.match(/^([^[]+)/)
let rest = part
if (propMatch) {
const prop = propMatch[1]
value = value[prop]
rest = part.slice(prop.length)
if (value === undefined) {
throw new Error(`No value found at path "${fullPath}" in block "${sourceName}".`)
}
}
const indexRe = /^\[(\d+)\]/
while (rest.length > 0) {
const m = rest.match(indexRe)
if (!m) {
throw new Error(`Invalid path "${part}" in "${fullPath}" for block "${sourceName}".`)
}
const idx = Number.parseInt(m[1], 10)
if (!Array.isArray(value)) {
throw new Error(`Invalid path "${part}" in "${fullPath}" for block "${sourceName}".`)
}
if (idx < 0 || idx >= value.length) {
throw new Error(
`Array index ${idx} out of bounds (length: ${value.length}) in path "${part}"`
)
}
value = value[idx]
rest = rest.slice(m[0].length)
}
return value
}
formatValueForBlock(
value: any,
blockType: string | undefined,
isInTemplateLiteral = false
): string {
if (blockType === 'condition') {
return this.stringifyForCondition(value)
}
if (blockType === 'function') {
return this.formatValueForCodeContext(value, isInTemplateLiteral)
}
if (blockType === 'response') {
if (typeof value === 'string') {
return JSON.stringify(value)
}
return value
}
if (typeof value === 'object' && value !== null) {
return JSON.stringify(value)
}
return String(value)
}
private stringifyForCondition(value: any): string {
if (typeof value === 'string') {
const sanitized = value
.replace(/\\/g, '\\\\')
.replace(/"/g, '\\"')
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
return `"${sanitized}"`
}
if (value === null) {
return 'null'
}
if (value === undefined) {
return 'undefined'
}
if (typeof value === 'object') {
return JSON.stringify(value)
}
return String(value)
}
private formatValueForCodeContext(value: any, isInTemplateLiteral: boolean): string {
if (isInTemplateLiteral) {
if (typeof value === 'string') {
return value
}
if (typeof value === 'object' && value !== null) {
return JSON.stringify(value)
}
return String(value)
}
if (typeof value === 'string') {
return JSON.stringify(value)
}
if (typeof value === 'object' && value !== null) {
return JSON.stringify(value)
}
if (value === undefined) {
return 'undefined'
}
if (value === null) {
return 'null'
}
return String(value)
}
tryParseJSON(value: any): any {
if (typeof value !== 'string') {
return value
}
const trimmed = value.trim()
if (trimmed.length > 0 && (trimmed.startsWith('{') || trimmed.startsWith('['))) {
try {
return JSON.parse(trimmed)
} catch {
return value
}
}
return value
}
}

View File

@@ -0,0 +1,22 @@
import { createLogger } from '@/lib/logs/console/logger'
import { extractEnvVarName, isEnvVarReference } from '@/executor/consts'
import type { ResolutionContext, Resolver } from './reference'
const logger = createLogger('EnvResolver')
export class EnvResolver implements Resolver {
canResolve(reference: string): boolean {
return isEnvVarReference(reference)
}
resolve(reference: string, context: ResolutionContext): any {
const varName = extractEnvVarName(reference)
const value = context.executionContext.environmentVariables?.[varName]
if (value === undefined) {
logger.debug('Environment variable not found, returning original reference', { varName })
return reference
}
return value
}
}

View File

@@ -0,0 +1,73 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isReference, parseReferencePath, REFERENCE } from '@/executor/consts'
import { extractBaseBlockId } from '@/executor/utils/subflow-utils'
import type { SerializedWorkflow } from '@/serializer/types'
import type { ResolutionContext, Resolver } from './reference'
const logger = createLogger('LoopResolver')
export class LoopResolver implements Resolver {
constructor(private workflow: SerializedWorkflow) {}
canResolve(reference: string): boolean {
if (!isReference(reference)) {
return false
}
const parts = parseReferencePath(reference)
if (parts.length === 0) {
return false
}
const [type] = parts
return type === REFERENCE.PREFIX.LOOP
}
resolve(reference: string, context: ResolutionContext): any {
const parts = parseReferencePath(reference)
if (parts.length < 2) {
logger.warn('Invalid loop reference - missing property', { reference })
return undefined
}
const [_, property] = parts
let loopScope = context.loopScope
if (!loopScope) {
const loopId = this.findLoopForBlock(context.currentNodeId)
if (!loopId) {
logger.debug('Block not in a loop', { nodeId: context.currentNodeId })
return undefined
}
loopScope = context.executionState.getLoopScope(loopId)
}
if (!loopScope) {
logger.warn('Loop scope not found', { reference })
return undefined
}
switch (property) {
case 'iteration':
case 'index':
return loopScope.iteration
case 'item':
case 'currentItem':
return loopScope.item
case 'items':
return loopScope.items
default:
logger.warn('Unknown loop property', { property })
return undefined
}
}
private findLoopForBlock(blockId: string): string | undefined {
const baseId = extractBaseBlockId(blockId)
for (const loopId of Object.keys(this.workflow.loops || {})) {
const loopConfig = this.workflow.loops[loopId]
if (loopConfig.nodes.includes(baseId)) {
return loopId
}
}
return undefined
}
}

View File

@@ -0,0 +1,100 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isReference, parseReferencePath, REFERENCE } from '@/executor/consts'
import { extractBaseBlockId, extractBranchIndex } from '@/executor/utils/subflow-utils'
import type { SerializedWorkflow } from '@/serializer/types'
import type { ResolutionContext, Resolver } from './reference'
const logger = createLogger('ParallelResolver')
export class ParallelResolver implements Resolver {
constructor(private workflow: SerializedWorkflow) {}
canResolve(reference: string): boolean {
if (!isReference(reference)) {
return false
}
const parts = parseReferencePath(reference)
if (parts.length === 0) {
return false
}
const [type] = parts
return type === REFERENCE.PREFIX.PARALLEL
}
resolve(reference: string, context: ResolutionContext): any {
const parts = parseReferencePath(reference)
if (parts.length < 2) {
logger.warn('Invalid parallel reference - missing property', { reference })
return undefined
}
const [_, property] = parts
const parallelId = this.findParallelForBlock(context.currentNodeId)
if (!parallelId) {
logger.debug('Block not in a parallel', { nodeId: context.currentNodeId })
return undefined
}
const parallelConfig = this.workflow.parallels?.[parallelId]
if (!parallelConfig) {
logger.warn('Parallel config not found', { parallelId })
return undefined
}
const branchIndex = extractBranchIndex(context.currentNodeId)
if (branchIndex === null) {
logger.debug('Node ID does not have branch index', { nodeId: context.currentNodeId })
return undefined
}
const distributionItems = this.getDistributionItems(parallelConfig)
switch (property) {
case 'index':
return branchIndex
case 'currentItem':
if (Array.isArray(distributionItems)) {
return distributionItems[branchIndex]
}
if (typeof distributionItems === 'object' && distributionItems !== null) {
const keys = Object.keys(distributionItems)
const key = keys[branchIndex]
return key !== undefined ? distributionItems[key] : undefined
}
return undefined
case 'items':
return distributionItems
default:
logger.warn('Unknown parallel property', { property })
return undefined
}
}
private findParallelForBlock(blockId: string): string | undefined {
const baseId = extractBaseBlockId(blockId)
if (!this.workflow.parallels) {
return undefined
}
for (const parallelId of Object.keys(this.workflow.parallels)) {
const parallelConfig = this.workflow.parallels[parallelId]
if (parallelConfig?.nodes.includes(baseId)) {
return parallelId
}
}
return undefined
}
private getDistributionItems(parallelConfig: any): any {
let distributionItems = parallelConfig.distributionItems || parallelConfig.distribution || []
if (typeof distributionItems === 'string' && !distributionItems.startsWith('<')) {
try {
distributionItems = JSON.parse(distributionItems.replace(/'/g, '"'))
} catch (e) {
logger.error('Failed to parse distribution items', { distributionItems })
return []
}
}
return distributionItems
}
}

View File

@@ -0,0 +1,13 @@
import type { ExecutionContext } from '@/executor/types'
import type { ExecutionState, LoopScope } from '../../execution/state'
export interface ResolutionContext {
executionContext: ExecutionContext
executionState: ExecutionState
currentNodeId: string
loopScope?: LoopScope
}
export interface Resolver {
canResolve(reference: string): boolean
resolve(reference: string, context: ResolutionContext): any
}

View File

@@ -0,0 +1,49 @@
import { createLogger } from '@/lib/logs/console/logger'
import { isReference, parseReferencePath, REFERENCE } from '@/executor/consts'
import type { ResolutionContext, Resolver } from './reference'
const logger = createLogger('WorkflowResolver')
export class WorkflowResolver implements Resolver {
constructor(private workflowVariables: Record<string, any>) {}
canResolve(reference: string): boolean {
if (!isReference(reference)) {
return false
}
const parts = parseReferencePath(reference)
if (parts.length === 0) {
return false
}
const [type] = parts
return type === REFERENCE.PREFIX.VARIABLE
}
resolve(reference: string, context: ResolutionContext): any {
const parts = parseReferencePath(reference)
if (parts.length < 2) {
logger.warn('Invalid variable reference - missing variable name', { reference })
return undefined
}
const [_, variableName] = parts
if (context.executionContext.workflowVariables) {
for (const varObj of Object.values(context.executionContext.workflowVariables)) {
const v = varObj as any
if (v.name === variableName || v.id === variableName) {
return v.value
}
}
}
for (const varObj of Object.values(this.workflowVariables)) {
const v = varObj as any
if (v.name === variableName || v.id === variableName) {
return v.value
}
}
logger.debug('Workflow variable not found', { variableName })
return undefined
}
}