Merge branch 'staging' into feat/run-from-block-2

This commit is contained in:
Vikhyath Mondreti
2026-01-28 12:46:23 -08:00
53 changed files with 1695 additions and 1600 deletions

File diff suppressed because one or more lines are too long

View File

@@ -35,8 +35,7 @@ const AutoLayoutRequestSchema = z.object({
})
.optional()
.default({}),
// Optional: if provided, use these blocks instead of loading from DB
// This allows using blocks with live measurements from the UI
gridSize: z.number().min(0).max(50).optional(),
blocks: z.record(z.any()).optional(),
edges: z.array(z.any()).optional(),
loops: z.record(z.any()).optional(),
@@ -53,7 +52,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
const { id: workflowId } = await params
try {
// Get the session
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized autolayout attempt for workflow ${workflowId}`)
@@ -62,7 +60,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
const userId = session.user.id
// Parse request body
const body = await request.json()
const layoutOptions = AutoLayoutRequestSchema.parse(body)
@@ -70,7 +67,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
userId,
})
// Fetch the workflow to check ownership/access
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow
@@ -79,7 +75,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check if user has permission to update this workflow
const canUpdate =
accessContext?.isOwner ||
(workflowData.workspaceId
@@ -94,8 +89,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Use provided blocks/edges if available (with live measurements from UI),
// otherwise load from database
let currentWorkflowData: NormalizedWorkflowData | null
if (layoutOptions.blocks && layoutOptions.edges) {
@@ -125,6 +118,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
y: layoutOptions.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
},
alignment: layoutOptions.alignment,
gridSize: layoutOptions.gridSize,
}
const layoutResult = applyAutoLayout(

View File

@@ -1,108 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { generateRequestId } from '@/lib/core/utils/request'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import {
DEFAULT_HORIZONTAL_SPACING,
DEFAULT_LAYOUT_PADDING,
DEFAULT_VERTICAL_SPACING,
} from '@/lib/workflows/autolayout/constants'
const logger = createLogger('YamlAutoLayoutAPI')
const AutoLayoutRequestSchema = z.object({
workflowState: z.object({
blocks: z.record(z.any()),
edges: z.array(z.any()),
loops: z.record(z.any()).optional().default({}),
parallels: z.record(z.any()).optional().default({}),
}),
options: z
.object({
spacing: z
.object({
horizontal: z.number().optional(),
vertical: z.number().optional(),
})
.optional(),
alignment: z.enum(['start', 'center', 'end']).optional(),
padding: z
.object({
x: z.number().optional(),
y: z.number().optional(),
})
.optional(),
})
.optional(),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const body = await request.json()
const { workflowState, options } = AutoLayoutRequestSchema.parse(body)
logger.info(`[${requestId}] Applying auto layout`, {
blockCount: Object.keys(workflowState.blocks).length,
edgeCount: workflowState.edges.length,
})
const autoLayoutOptions = {
horizontalSpacing: options?.spacing?.horizontal ?? DEFAULT_HORIZONTAL_SPACING,
verticalSpacing: options?.spacing?.vertical ?? DEFAULT_VERTICAL_SPACING,
padding: {
x: options?.padding?.x ?? DEFAULT_LAYOUT_PADDING.x,
y: options?.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
},
alignment: options?.alignment ?? 'center',
}
const layoutResult = applyAutoLayout(
workflowState.blocks,
workflowState.edges,
autoLayoutOptions
)
if (!layoutResult.success || !layoutResult.blocks) {
logger.error(`[${requestId}] Auto layout failed:`, {
error: layoutResult.error,
})
return NextResponse.json(
{
success: false,
errors: [layoutResult.error || 'Unknown auto layout error'],
},
{ status: 500 }
)
}
logger.info(`[${requestId}] Auto layout completed successfully:`, {
success: true,
blockCount: Object.keys(layoutResult.blocks).length,
})
const transformedResponse = {
success: true,
workflowState: {
blocks: layoutResult.blocks,
edges: workflowState.edges,
loops: workflowState.loops || {},
parallels: workflowState.parallels || {},
},
}
return NextResponse.json(transformedResponse)
} catch (error) {
logger.error(`[${requestId}] Auto layout failed:`, error)
return NextResponse.json(
{
success: false,
errors: [error instanceof Error ? error.message : 'Unknown auto layout error'],
},
{ status: 500 }
)
}
}

View File

@@ -3,6 +3,7 @@ import { createLogger } from '@sim/logger'
import { useReactFlow } from 'reactflow'
import type { AutoLayoutOptions } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
import { applyAutoLayoutAndUpdateStore as applyAutoLayoutStandalone } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
import { useSnapToGridSize } from '@/hooks/queries/general-settings'
import { useCanvasViewport } from '@/hooks/use-canvas-viewport'
export type { AutoLayoutOptions }
@@ -13,21 +14,28 @@ const logger = createLogger('useAutoLayout')
* Hook providing auto-layout functionality for workflows.
* Binds workflowId context and provides memoized callback for React components.
* Includes automatic fitView animation after successful layout.
* Automatically uses the user's snap-to-grid setting for grid-aligned layout.
*
* Note: This hook requires a ReactFlowProvider ancestor.
*/
export function useAutoLayout(workflowId: string | null) {
const reactFlowInstance = useReactFlow()
const { fitViewToBounds } = useCanvasViewport(reactFlowInstance)
const snapToGridSize = useSnapToGridSize()
const applyAutoLayoutAndUpdateStore = useCallback(
async (options: AutoLayoutOptions = {}) => {
if (!workflowId) {
return { success: false, error: 'No workflow ID provided' }
}
return applyAutoLayoutStandalone(workflowId, options)
// Include gridSize from user's snap-to-grid setting
const optionsWithGrid: AutoLayoutOptions = {
...options,
gridSize: options.gridSize ?? (snapToGridSize > 0 ? snapToGridSize : undefined),
}
return applyAutoLayoutStandalone(workflowId, optionsWithGrid)
},
[workflowId]
[workflowId, snapToGridSize]
)
/**

View File

@@ -21,6 +21,7 @@ export interface AutoLayoutOptions {
x?: number
y?: number
}
gridSize?: number
}
/**
@@ -62,6 +63,7 @@ export async function applyAutoLayoutAndUpdateStore(
x: options.padding?.x ?? DEFAULT_LAYOUT_PADDING.x,
y: options.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
},
gridSize: options.gridSize,
}
// Call the autolayout API route

View File

@@ -2352,33 +2352,12 @@ const WorkflowContent = React.memo(() => {
window.removeEventListener('remove-from-subflow', handleRemoveFromSubflow as EventListener)
}, [blocks, edgesForDisplay, getNodeAbsolutePosition, collaborativeBatchUpdateParent])
/** Handles node changes - applies changes and resolves parent-child selection conflicts. */
const onNodesChange = useCallback(
(changes: NodeChange[]) => {
selectedIdsRef.current = null
setDisplayNodes((nds) => {
const updated = applyNodeChanges(changes, nds)
const hasSelectionChange = changes.some((c) => c.type === 'select')
if (!hasSelectionChange) return updated
const resolved = resolveParentChildSelectionConflicts(updated, blocks)
selectedIdsRef.current = resolved.filter((node) => node.selected).map((node) => node.id)
return resolved
})
const selectedIds = selectedIdsRef.current as string[] | null
if (selectedIds !== null) {
syncPanelWithSelection(selectedIds)
}
},
[blocks]
)
/**
* Updates container dimensions in displayNodes during drag.
* This allows live resizing of containers as their children are dragged.
* Updates container dimensions in displayNodes during drag or keyboard movement.
*/
const updateContainerDimensionsDuringDrag = useCallback(
(draggedNodeId: string, draggedNodePosition: { x: number; y: number }) => {
const parentId = blocks[draggedNodeId]?.data?.parentId
const updateContainerDimensionsDuringMove = useCallback(
(movedNodeId: string, movedNodePosition: { x: number; y: number }) => {
const parentId = blocks[movedNodeId]?.data?.parentId
if (!parentId) return
setDisplayNodes((currentNodes) => {
@@ -2386,7 +2365,7 @@ const WorkflowContent = React.memo(() => {
if (childNodes.length === 0) return currentNodes
const childPositions = childNodes.map((node) => {
const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position
const nodePosition = node.id === movedNodeId ? movedNodePosition : node.position
const { width, height } = getBlockDimensions(node.id)
return { x: nodePosition.x, y: nodePosition.y, width, height }
})
@@ -2417,6 +2396,55 @@ const WorkflowContent = React.memo(() => {
[blocks, getBlockDimensions]
)
/** Handles node changes - applies changes and resolves parent-child selection conflicts. */
const onNodesChange = useCallback(
(changes: NodeChange[]) => {
selectedIdsRef.current = null
setDisplayNodes((nds) => {
const updated = applyNodeChanges(changes, nds)
const hasSelectionChange = changes.some((c) => c.type === 'select')
if (!hasSelectionChange) return updated
const resolved = resolveParentChildSelectionConflicts(updated, blocks)
selectedIdsRef.current = resolved.filter((node) => node.selected).map((node) => node.id)
return resolved
})
const selectedIds = selectedIdsRef.current as string[] | null
if (selectedIds !== null) {
syncPanelWithSelection(selectedIds)
}
// Handle position changes (e.g., from keyboard arrow key movement)
// Update container dimensions when child nodes are moved and persist to backend
// Only persist if not in a drag operation (drag-end is handled by onNodeDragStop)
const isInDragOperation =
getDragStartPosition() !== null || multiNodeDragStartRef.current.size > 0
const keyboardPositionUpdates: Array<{ id: string; position: { x: number; y: number } }> = []
for (const change of changes) {
if (
change.type === 'position' &&
!change.dragging &&
'position' in change &&
change.position
) {
updateContainerDimensionsDuringMove(change.id, change.position)
if (!isInDragOperation) {
keyboardPositionUpdates.push({ id: change.id, position: change.position })
}
}
}
// Persist keyboard movements to backend for collaboration sync
if (keyboardPositionUpdates.length > 0) {
collaborativeBatchUpdatePositions(keyboardPositionUpdates)
}
},
[
blocks,
updateContainerDimensionsDuringMove,
collaborativeBatchUpdatePositions,
getDragStartPosition,
]
)
/**
* Effect to resize loops when nodes change (add/remove/position change).
* Runs on structural changes only - not during drag (position-only changes).
@@ -2661,7 +2689,7 @@ const WorkflowContent = React.memo(() => {
// If the node is inside a container, update container dimensions during drag
if (currentParentId) {
updateContainerDimensionsDuringDrag(node.id, node.position)
updateContainerDimensionsDuringMove(node.id, node.position)
}
// Check if this is a starter block - starter blocks should never be in containers
@@ -2778,7 +2806,7 @@ const WorkflowContent = React.memo(() => {
blocks,
getNodeAbsolutePosition,
getNodeDepth,
updateContainerDimensionsDuringDrag,
updateContainerDimensionsDuringMove,
highlightContainerNode,
]
)

View File

@@ -1,241 +0,0 @@
/**
* Search utility functions for tiered matching algorithm
* Provides predictable search results prioritizing exact matches over fuzzy matches
*/
export interface SearchableItem {
id: string
name: string
description?: string
type: string
aliases?: string[]
[key: string]: any
}
export interface SearchResult<T extends SearchableItem> {
item: T
score: number
matchType: 'exact' | 'prefix' | 'alias' | 'word-boundary' | 'substring' | 'description'
}
const SCORE_EXACT_MATCH = 10000
const SCORE_PREFIX_MATCH = 5000
const SCORE_ALIAS_MATCH = 3000
const SCORE_WORD_BOUNDARY = 1000
const SCORE_SUBSTRING_MATCH = 100
const DESCRIPTION_WEIGHT = 0.3
/**
* Calculate match score for a single field
* Returns 0 if no match found
*/
function calculateFieldScore(
query: string,
field: string
): {
score: number
matchType: 'exact' | 'prefix' | 'word-boundary' | 'substring' | null
} {
const normalizedQuery = query.toLowerCase().trim()
const normalizedField = field.toLowerCase().trim()
if (!normalizedQuery || !normalizedField) {
return { score: 0, matchType: null }
}
// Tier 1: Exact match
if (normalizedField === normalizedQuery) {
return { score: SCORE_EXACT_MATCH, matchType: 'exact' }
}
// Tier 2: Prefix match (starts with query)
if (normalizedField.startsWith(normalizedQuery)) {
return { score: SCORE_PREFIX_MATCH, matchType: 'prefix' }
}
// Tier 3: Word boundary match (query matches start of a word)
const words = normalizedField.split(/[\s-_/]+/)
const hasWordBoundaryMatch = words.some((word) => word.startsWith(normalizedQuery))
if (hasWordBoundaryMatch) {
return { score: SCORE_WORD_BOUNDARY, matchType: 'word-boundary' }
}
// Tier 4: Substring match (query appears anywhere)
if (normalizedField.includes(normalizedQuery)) {
return { score: SCORE_SUBSTRING_MATCH, matchType: 'substring' }
}
// No match
return { score: 0, matchType: null }
}
/**
* Check if query matches any alias in the item's aliases array
* Returns the alias score if a match is found, 0 otherwise
*/
function calculateAliasScore(
query: string,
aliases?: string[]
): { score: number; matchType: 'alias' | null } {
if (!aliases || aliases.length === 0) {
return { score: 0, matchType: null }
}
const normalizedQuery = query.toLowerCase().trim()
for (const alias of aliases) {
const normalizedAlias = alias.toLowerCase().trim()
if (normalizedAlias === normalizedQuery) {
return { score: SCORE_ALIAS_MATCH, matchType: 'alias' }
}
if (normalizedAlias.startsWith(normalizedQuery)) {
return { score: SCORE_ALIAS_MATCH * 0.8, matchType: 'alias' }
}
if (normalizedQuery.includes(normalizedAlias) || normalizedAlias.includes(normalizedQuery)) {
return { score: SCORE_ALIAS_MATCH * 0.6, matchType: 'alias' }
}
}
return { score: 0, matchType: null }
}
/**
* Calculate multi-word match score
* Each word in the query must appear somewhere in the field
* Returns a score based on how well the words match
*/
function calculateMultiWordScore(
queryWords: string[],
field: string
): { score: number; matchType: 'word-boundary' | 'substring' | null } {
const normalizedField = field.toLowerCase().trim()
const fieldWords = normalizedField.split(/[\s\-_/:]+/)
let allWordsMatch = true
let totalScore = 0
let hasWordBoundary = false
for (const queryWord of queryWords) {
const wordBoundaryMatch = fieldWords.some((fw) => fw.startsWith(queryWord))
const substringMatch = normalizedField.includes(queryWord)
if (wordBoundaryMatch) {
totalScore += SCORE_WORD_BOUNDARY
hasWordBoundary = true
} else if (substringMatch) {
totalScore += SCORE_SUBSTRING_MATCH
} else {
allWordsMatch = false
break
}
}
if (!allWordsMatch) {
return { score: 0, matchType: null }
}
return {
score: totalScore / queryWords.length,
matchType: hasWordBoundary ? 'word-boundary' : 'substring',
}
}
/**
* Search items using tiered matching algorithm
* Returns items sorted by relevance (highest score first)
*/
export function searchItems<T extends SearchableItem>(
query: string,
items: T[]
): SearchResult<T>[] {
const normalizedQuery = query.trim()
if (!normalizedQuery) {
return []
}
const results: SearchResult<T>[] = []
const queryWords = normalizedQuery.toLowerCase().split(/\s+/).filter(Boolean)
const isMultiWord = queryWords.length > 1
for (const item of items) {
const nameMatch = calculateFieldScore(normalizedQuery, item.name)
const descMatch = item.description
? calculateFieldScore(normalizedQuery, item.description)
: { score: 0, matchType: null }
const aliasMatch = calculateAliasScore(normalizedQuery, item.aliases)
let nameScore = nameMatch.score
let descScore = descMatch.score * DESCRIPTION_WEIGHT
const aliasScore = aliasMatch.score
let bestMatchType = nameMatch.matchType
// For multi-word queries, also try matching each word independently and take the better score
if (isMultiWord) {
const multiWordNameMatch = calculateMultiWordScore(queryWords, item.name)
if (multiWordNameMatch.score > nameScore) {
nameScore = multiWordNameMatch.score
bestMatchType = multiWordNameMatch.matchType
}
if (item.description) {
const multiWordDescMatch = calculateMultiWordScore(queryWords, item.description)
const multiWordDescScore = multiWordDescMatch.score * DESCRIPTION_WEIGHT
if (multiWordDescScore > descScore) {
descScore = multiWordDescScore
}
}
}
const bestScore = Math.max(nameScore, descScore, aliasScore)
if (bestScore > 0) {
let matchType: SearchResult<T>['matchType'] = 'substring'
if (nameScore >= descScore && nameScore >= aliasScore) {
matchType = bestMatchType || 'substring'
} else if (aliasScore >= descScore) {
matchType = 'alias'
} else {
matchType = 'description'
}
results.push({
item,
score: bestScore,
matchType,
})
}
}
results.sort((a, b) => b.score - a.score)
return results
}
/**
* Get a human-readable match type label
*/
export function getMatchTypeLabel(matchType: SearchResult<any>['matchType']): string {
switch (matchType) {
case 'exact':
return 'Exact match'
case 'prefix':
return 'Starts with'
case 'alias':
return 'Similar to'
case 'word-boundary':
return 'Word match'
case 'substring':
return 'Contains'
case 'description':
return 'In description'
default:
return 'Match'
}
}

View File

@@ -176,7 +176,7 @@ function FormattedInput({
onChange,
onScroll,
}: FormattedInputProps) {
const handleScroll = (e: React.UIEvent<HTMLInputElement>) => {
const handleScroll = (e: { currentTarget: HTMLInputElement }) => {
onScroll(e.currentTarget.scrollLeft)
}

View File

@@ -73,7 +73,12 @@ export const Sidebar = memo(function Sidebar() {
const { data: sessionData, isPending: sessionLoading } = useSession()
const { canEdit } = useUserPermissionsContext()
const { config: permissionConfig } = usePermissionConfig()
const { config: permissionConfig, filterBlocks } = usePermissionConfig()
const initializeSearchData = useSearchModalStore((state) => state.initializeData)
useEffect(() => {
initializeSearchData(filterBlocks)
}, [initializeSearchData, filterBlocks])
/**
* Sidebar state from store with hydration tracking to prevent SSR mismatch.

File diff suppressed because one or more lines are too long

View File

@@ -207,6 +207,7 @@ export class EdgeConstructor {
for (const connection of workflow.connections) {
let { source, target } = connection
const originalSource = source
const originalTarget = target
let sourceHandle = this.generateSourceHandle(
source,
target,
@@ -257,14 +258,14 @@ export class EdgeConstructor {
target = sentinelStartId
}
if (loopSentinelStartId) {
this.addEdge(dag, loopSentinelStartId, target, EDGE.LOOP_EXIT, targetHandle)
}
if (this.edgeCrossesLoopBoundary(source, target, blocksInLoops, dag)) {
continue
}
if (loopSentinelStartId && !blocksInLoops.has(originalTarget)) {
this.addEdge(dag, loopSentinelStartId, target, EDGE.LOOP_EXIT, targetHandle)
}
if (!this.isEdgeReachable(source, target, reachableBlocks, dag)) {
continue
}

View File

@@ -28,6 +28,7 @@ import type {
} from '@/executor/types'
import { streamingResponseFormatProcessor } from '@/executor/utils'
import { buildBlockExecutionError, normalizeError } from '@/executor/utils/errors'
import { isJSONString } from '@/executor/utils/json'
import { filterOutputForLog } from '@/executor/utils/output-filter'
import { validateBlockType } from '@/executor/utils/permission-check'
import type { VariableResolver } from '@/executor/variables/resolver'
@@ -86,7 +87,7 @@ export class BlockExecutor {
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
if (blockLog) {
blockLog.input = resolvedInputs
blockLog.input = this.parseJsonInputs(resolvedInputs)
}
} catch (error) {
cleanupSelfReference?.()
@@ -157,7 +158,14 @@ export class BlockExecutor {
const displayOutput = filterOutputForLog(block.metadata?.id || '', normalizedOutput, {
block,
})
this.callOnBlockComplete(ctx, node, block, resolvedInputs, displayOutput, duration)
this.callOnBlockComplete(
ctx,
node,
block,
this.parseJsonInputs(resolvedInputs),
displayOutput,
duration
)
}
return normalizedOutput
@@ -233,7 +241,7 @@ export class BlockExecutor {
blockLog.durationMs = duration
blockLog.success = false
blockLog.error = errorMessage
blockLog.input = input
blockLog.input = this.parseJsonInputs(input)
blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
}
@@ -248,7 +256,14 @@ export class BlockExecutor {
if (!isSentinel) {
const displayOutput = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
this.callOnBlockComplete(ctx, node, block, input, displayOutput, duration)
this.callOnBlockComplete(
ctx,
node,
block,
this.parseJsonInputs(input),
displayOutput,
duration
)
}
const hasErrorPort = this.hasErrorPortEdge(node)
@@ -336,6 +351,36 @@ export class BlockExecutor {
return { result: output }
}
/**
* Parse JSON string inputs to objects for log display only.
* Attempts to parse any string that looks like JSON.
* Returns a new object - does not mutate the original inputs.
*/
private parseJsonInputs(inputs: Record<string, any>): Record<string, any> {
let result = inputs
let hasChanges = false
for (const [key, value] of Object.entries(inputs)) {
// isJSONString is a quick heuristic (checks for { or [), not a validator.
// Invalid JSON is safely caught below - this just avoids JSON.parse on every string.
if (typeof value !== 'string' || !isJSONString(value)) {
continue
}
try {
if (!hasChanges) {
result = { ...inputs }
hasChanges = true
}
result[key] = JSON.parse(value.trim())
} catch {
// Not valid JSON, keep original string
}
}
return result
}
private callOnBlockStart(ctx: ExecutionContext, node: DAGNode, block: SerializedBlock): void {
const blockId = node.id
const blockName = block.metadata?.name ?? blockId

View File

@@ -77,15 +77,16 @@ export class EdgeManager {
}
}
// Check if any deactivation targets that previously received an activated edge are now ready
for (const { target } of edgesToDeactivate) {
if (
!readyNodes.includes(target) &&
!activatedTargets.includes(target) &&
this.nodesWithActivatedEdge.has(target) &&
this.isTargetReady(target)
) {
readyNodes.push(target)
if (output.selectedRoute !== EDGE.LOOP_EXIT && output.selectedRoute !== EDGE.PARALLEL_EXIT) {
for (const { target } of edgesToDeactivate) {
if (
!readyNodes.includes(target) &&
!activatedTargets.includes(target) &&
this.nodesWithActivatedEdge.has(target) &&
this.isTargetReady(target)
) {
readyNodes.push(target)
}
}
}

View File

@@ -412,6 +412,12 @@ export class ExecutionEngine {
logger.info('Processing outgoing edges', {
nodeId,
outgoingEdgesCount: node.outgoingEdges.size,
outgoingEdges: Array.from(node.outgoingEdges.entries()).map(([id, e]) => ({
id,
target: e.target,
sourceHandle: e.sourceHandle,
})),
output,
readyNodesCount: readyNodes.length,
readyNodes,
})

View File

@@ -27,6 +27,8 @@ export interface ParallelScope {
items?: any[]
/** Error message if parallel validation failed (e.g., exceeded max branches) */
validationError?: string
/** Whether the parallel has an empty distribution and should be skipped */
isEmpty?: boolean
}
export class ExecutionState implements BlockStateController {

View File

@@ -936,8 +936,12 @@ export class AgentBlockHandler implements BlockHandler {
systemPrompt: validMessages ? undefined : inputs.systemPrompt,
context: validMessages ? undefined : stringifyJSON(messages),
tools: formattedTools,
temperature: inputs.temperature,
maxTokens: inputs.maxTokens,
temperature:
inputs.temperature != null && inputs.temperature !== ''
? Number(inputs.temperature)
: undefined,
maxTokens:
inputs.maxTokens != null && inputs.maxTokens !== '' ? Number(inputs.maxTokens) : undefined,
apiKey: inputs.apiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,

View File

@@ -14,8 +14,8 @@ export interface AgentInputs {
slidingWindowSize?: string // For message-based sliding window
slidingWindowTokens?: string // For token-based sliding window
// LLM parameters
temperature?: number
maxTokens?: number
temperature?: string
maxTokens?: string
apiKey?: string
azureEndpoint?: string
azureApiVersion?: string

View File

@@ -395,10 +395,10 @@ export class LoopOrchestrator {
return true
}
// forEach: skip if items array is empty
if (scope.loopType === 'forEach') {
if (!scope.items || scope.items.length === 0) {
logger.info('ForEach loop has empty items, skipping loop body', { loopId })
logger.info('ForEach loop has empty collection, skipping loop body', { loopId })
this.state.setBlockOutput(loopId, { results: [] }, DEFAULTS.EXECUTION_TIME)
return false
}
return true
@@ -408,6 +408,8 @@ export class LoopOrchestrator {
if (scope.loopType === 'for') {
if (scope.maxIterations === 0) {
logger.info('For loop has 0 iterations, skipping loop body', { loopId })
// Set empty output for the loop
this.state.setBlockOutput(loopId, { results: [] }, DEFAULTS.EXECUTION_TIME)
return false
}
return true

View File

@@ -108,7 +108,7 @@ export class NodeExecutionOrchestrator {
if (loopId) {
const shouldExecute = await this.loopOrchestrator.evaluateInitialCondition(ctx, loopId)
if (!shouldExecute) {
logger.info('While loop initial condition false, skipping loop body', { loopId })
logger.info('Loop initial condition false, skipping loop body', { loopId })
return {
sentinelStart: true,
shouldExit: true,
@@ -169,6 +169,17 @@ export class NodeExecutionOrchestrator {
this.parallelOrchestrator.initializeParallelScope(ctx, parallelId, nodesInParallel)
}
}
const scope = this.parallelOrchestrator.getParallelScope(ctx, parallelId)
if (scope?.isEmpty) {
logger.info('Parallel has empty distribution, skipping parallel body', { parallelId })
return {
sentinelStart: true,
shouldExit: true,
selectedRoute: EDGE.PARALLEL_EXIT,
}
}
return { sentinelStart: true }
}

View File

@@ -61,11 +61,13 @@ export class ParallelOrchestrator {
let items: any[] | undefined
let branchCount: number
let isEmpty = false
try {
const resolved = this.resolveBranchCount(ctx, parallelConfig)
const resolved = this.resolveBranchCount(ctx, parallelConfig, parallelId)
branchCount = resolved.branchCount
items = resolved.items
isEmpty = resolved.isEmpty ?? false
} catch (error) {
const errorMessage = `Parallel Items did not resolve: ${error instanceof Error ? error.message : String(error)}`
logger.error(errorMessage, { parallelId, distribution: parallelConfig.distribution })
@@ -91,6 +93,34 @@ export class ParallelOrchestrator {
throw new Error(branchError)
}
// Handle empty distribution - skip parallel body
if (isEmpty || branchCount === 0) {
const scope: ParallelScope = {
parallelId,
totalBranches: 0,
branchOutputs: new Map(),
completedCount: 0,
totalExpectedNodes: 0,
items: [],
isEmpty: true,
}
if (!ctx.parallelExecutions) {
ctx.parallelExecutions = new Map()
}
ctx.parallelExecutions.set(parallelId, scope)
// Set empty output for the parallel
this.state.setBlockOutput(parallelId, { results: [] })
logger.info('Parallel scope initialized with empty distribution, skipping body', {
parallelId,
branchCount: 0,
})
return scope
}
const { entryNodes } = this.expander.expandParallel(this.dag, parallelId, branchCount, items)
const scope: ParallelScope = {
@@ -127,15 +157,17 @@ export class ParallelOrchestrator {
private resolveBranchCount(
ctx: ExecutionContext,
config: SerializedParallel
): { branchCount: number; items?: any[] } {
config: SerializedParallel,
parallelId: string
): { branchCount: number; items?: any[]; isEmpty?: boolean } {
if (config.parallelType === 'count') {
return { branchCount: config.count ?? 1 }
}
const items = this.resolveDistributionItems(ctx, config)
if (items.length === 0) {
return { branchCount: config.count ?? 1 }
logger.info('Parallel has empty distribution, skipping parallel body', { parallelId })
return { branchCount: 0, items: [], isEmpty: true }
}
return { branchCount: items.length, items }

View File

@@ -8,7 +8,7 @@ const ivm = require('isolated-vm')
const USER_CODE_START_LINE = 4
const pendingFetches = new Map()
let fetchIdCounter = 0
const FETCH_TIMEOUT_MS = 30000
const FETCH_TIMEOUT_MS = 300000 // 5 minutes
/**
* Extract line and column from error stack or message

View File

@@ -34,6 +34,7 @@ export function layoutContainers(
: DEFAULT_CONTAINER_HORIZONTAL_SPACING,
verticalSpacing: options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING,
padding: { x: CONTAINER_PADDING_X, y: CONTAINER_PADDING_Y },
gridSize: options.gridSize,
}
for (const [parentId, childIds] of children.entries()) {
@@ -56,18 +57,15 @@ export function layoutContainers(
continue
}
// Use the shared core layout function with container options
const { nodes, dimensions } = layoutBlocksCore(childBlocks, childEdges, {
isContainer: true,
layoutOptions: containerOptions,
})
// Apply positions back to blocks
for (const node of nodes.values()) {
blocks[node.id].position = node.position
}
// Update container dimensions
const calculatedWidth = dimensions.width
const calculatedHeight = dimensions.height

View File

@@ -9,6 +9,7 @@ import {
getBlockMetrics,
normalizePositions,
prepareBlockMetrics,
snapNodesToGrid,
} from '@/lib/workflows/autolayout/utils'
import { BLOCK_DIMENSIONS, HANDLE_POSITIONS } from '@/lib/workflows/blocks/block-dimensions'
import { EDGE } from '@/executor/constants'
@@ -84,7 +85,6 @@ export function assignLayers(
): Map<string, GraphNode> {
const nodes = new Map<string, GraphNode>()
// Initialize nodes
for (const [id, block] of Object.entries(blocks)) {
nodes.set(id, {
id,
@@ -97,7 +97,6 @@ export function assignLayers(
})
}
// Build a map of target node -> edges coming into it (to check sourceHandle later)
const incomingEdgesMap = new Map<string, Edge[]>()
for (const edge of edges) {
if (!incomingEdgesMap.has(edge.target)) {
@@ -106,7 +105,6 @@ export function assignLayers(
incomingEdgesMap.get(edge.target)!.push(edge)
}
// Build adjacency from edges
for (const edge of edges) {
const sourceNode = nodes.get(edge.source)
const targetNode = nodes.get(edge.target)
@@ -117,7 +115,6 @@ export function assignLayers(
}
}
// Find starter nodes (no incoming edges)
const starterNodes = Array.from(nodes.values()).filter((node) => node.incoming.size === 0)
if (starterNodes.length === 0 && nodes.size > 0) {
@@ -126,7 +123,6 @@ export function assignLayers(
logger.warn('No starter blocks found, using first block as starter', { blockId: firstNode.id })
}
// Topological sort using Kahn's algorithm
const inDegreeCount = new Map<string, number>()
for (const node of nodes.values()) {
@@ -144,8 +140,6 @@ export function assignLayers(
const node = nodes.get(nodeId)!
processed.add(nodeId)
// Calculate layer based on max incoming layer + 1
// For edges from subflow ends, add the subflow's internal depth (minus 1 to avoid double-counting)
if (node.incoming.size > 0) {
let maxEffectiveLayer = -1
const incomingEdges = incomingEdgesMap.get(nodeId) || []
@@ -153,16 +147,11 @@ export function assignLayers(
for (const incomingId of node.incoming) {
const incomingNode = nodes.get(incomingId)
if (incomingNode) {
// Find edges from this incoming node to check if it's a subflow end edge
const edgesFromSource = incomingEdges.filter((e) => e.source === incomingId)
let additionalDepth = 0
// Check if any edge from this source is a subflow end edge
const hasSubflowEndEdge = edgesFromSource.some(isSubflowEndEdge)
if (hasSubflowEndEdge && subflowDepths) {
// Get the internal depth of the subflow
// Subtract 1 because the +1 at the end of layer calculation already accounts for one layer
// E.g., if subflow has 2 internal layers (depth=2), we add 1 extra so total offset is 2
const depth = subflowDepths.get(incomingId) ?? 1
additionalDepth = Math.max(0, depth - 1)
}
@@ -174,7 +163,6 @@ export function assignLayers(
node.layer = maxEffectiveLayer + 1
}
// Add outgoing nodes when all dependencies processed
for (const targetId of node.outgoing) {
const currentCount = inDegreeCount.get(targetId) || 0
inDegreeCount.set(targetId, currentCount - 1)
@@ -185,7 +173,6 @@ export function assignLayers(
}
}
// Handle isolated nodes
for (const node of nodes.values()) {
if (!processed.has(node.id)) {
logger.debug('Isolated node detected, assigning to layer 0', { blockId: node.id })
@@ -224,7 +211,6 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
hasOverlap = false
iteration++
// Group nodes by layer for same-layer overlap resolution
const nodesByLayer = new Map<number, GraphNode[]>()
for (const node of nodes) {
if (!nodesByLayer.has(node.layer)) {
@@ -233,11 +219,9 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
nodesByLayer.get(node.layer)!.push(node)
}
// Process each layer independently
for (const [layer, layerNodes] of nodesByLayer) {
if (layerNodes.length < 2) continue
// Sort by Y position for consistent processing
layerNodes.sort((a, b) => a.position.y - b.position.y)
for (let i = 0; i < layerNodes.length - 1; i++) {
@@ -302,7 +286,6 @@ export function calculatePositions(
const layerNumbers = Array.from(layers.keys()).sort((a, b) => a - b)
// Calculate max width for each layer
const layerWidths = new Map<number, number>()
for (const layerNum of layerNumbers) {
const nodesInLayer = layers.get(layerNum)!
@@ -310,7 +293,6 @@ export function calculatePositions(
layerWidths.set(layerNum, maxWidth)
}
// Calculate cumulative X positions for each layer based on actual widths
const layerXPositions = new Map<number, number>()
let cumulativeX = padding.x
@@ -319,7 +301,6 @@ export function calculatePositions(
cumulativeX += layerWidths.get(layerNum)! + horizontalSpacing
}
// Build a flat map of all nodes for quick lookups
const allNodes = new Map<string, GraphNode>()
for (const nodesInLayer of layers.values()) {
for (const node of nodesInLayer) {
@@ -327,7 +308,6 @@ export function calculatePositions(
}
}
// Build incoming edges map for handle lookups
const incomingEdgesMap = new Map<string, Edge[]>()
for (const edge of edges) {
if (!incomingEdgesMap.has(edge.target)) {
@@ -336,20 +316,16 @@ export function calculatePositions(
incomingEdgesMap.get(edge.target)!.push(edge)
}
// Position nodes layer by layer, aligning with connected predecessors
for (const layerNum of layerNumbers) {
const nodesInLayer = layers.get(layerNum)!
const xPosition = layerXPositions.get(layerNum)!
// Separate containers and non-containers
const containersInLayer = nodesInLayer.filter(isContainerBlock)
const nonContainersInLayer = nodesInLayer.filter((n) => !isContainerBlock(n))
// For the first layer (layer 0), position sequentially from padding.y
if (layerNum === 0) {
let yOffset = padding.y
// Sort containers by height for visual balance
containersInLayer.sort((a, b) => b.metrics.height - a.metrics.height)
for (const node of containersInLayer) {
@@ -361,7 +337,6 @@ export function calculatePositions(
yOffset += CONTAINER_VERTICAL_CLEARANCE
}
// Sort non-containers by outgoing connections
nonContainersInLayer.sort((a, b) => b.outgoing.size - a.outgoing.size)
for (const node of nonContainersInLayer) {
@@ -371,9 +346,7 @@ export function calculatePositions(
continue
}
// For subsequent layers, align with connected predecessors (handle-to-handle)
for (const node of [...containersInLayer, ...nonContainersInLayer]) {
// Find the bottommost predecessor handle Y (highest value) and align to it
let bestSourceHandleY = -1
let bestEdge: Edge | null = null
const incomingEdges = incomingEdgesMap.get(node.id) || []
@@ -381,7 +354,6 @@ export function calculatePositions(
for (const edge of incomingEdges) {
const predecessor = allNodes.get(edge.source)
if (predecessor) {
// Calculate actual source handle Y position based on block type and handle
const sourceHandleOffset = getSourceHandleYOffset(predecessor.block, edge.sourceHandle)
const sourceHandleY = predecessor.position.y + sourceHandleOffset
@@ -392,20 +364,16 @@ export function calculatePositions(
}
}
// If no predecessors found (shouldn't happen for layer > 0), use padding
if (bestSourceHandleY < 0) {
bestSourceHandleY = padding.y + HANDLE_POSITIONS.DEFAULT_Y_OFFSET
}
// Calculate the target handle Y offset for this node
const targetHandleOffset = getTargetHandleYOffset(node.block, bestEdge?.targetHandle)
// Position node so its target handle aligns with the source handle Y
node.position = { x: xPosition, y: bestSourceHandleY - targetHandleOffset }
}
}
// Resolve vertical overlaps within layers (X overlaps prevented by cumulative positioning)
resolveVerticalOverlaps(Array.from(layers.values()).flat(), verticalSpacing)
}
@@ -435,7 +403,7 @@ export function layoutBlocksCore(
return { nodes: new Map(), dimensions: { width: 0, height: 0 } }
}
const layoutOptions =
const layoutOptions: LayoutOptions =
options.layoutOptions ??
(options.isContainer ? CONTAINER_LAYOUT_OPTIONS : DEFAULT_LAYOUT_OPTIONS)
@@ -452,7 +420,13 @@ export function layoutBlocksCore(
calculatePositions(layers, edges, layoutOptions)
// 5. Normalize positions
const dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
let dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
// 6. Snap to grid if gridSize is specified (recalculates dimensions)
const snappedDimensions = snapNodesToGrid(nodes, layoutOptions.gridSize)
if (snappedDimensions) {
dimensions = snappedDimensions
}
return { nodes, dimensions }
}

View File

@@ -36,14 +36,13 @@ export function applyAutoLayout(
const horizontalSpacing = options.horizontalSpacing ?? DEFAULT_HORIZONTAL_SPACING
const verticalSpacing = options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING
// Pre-calculate container dimensions by laying out their children (bottom-up)
// This ensures accurate widths/heights before root-level layout
prepareContainerDimensions(
blocksCopy,
edges,
layoutBlocksCore,
horizontalSpacing,
verticalSpacing
verticalSpacing,
options.gridSize
)
const { root: rootBlockIds } = getBlocksByParent(blocksCopy)
@@ -58,8 +57,6 @@ export function applyAutoLayout(
(edge) => layoutRootIds.includes(edge.source) && layoutRootIds.includes(edge.target)
)
// Calculate subflow depths before laying out root blocks
// This ensures blocks connected to subflow ends are positioned correctly
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
if (Object.keys(rootBlocks).length > 0) {
@@ -95,13 +92,12 @@ export function applyAutoLayout(
}
export type { TargetedLayoutOptions } from '@/lib/workflows/autolayout/targeted'
// Function exports
export { applyTargetedLayout } from '@/lib/workflows/autolayout/targeted'
// Type exports
export type { Edge, LayoutOptions, LayoutResult } from '@/lib/workflows/autolayout/types'
export {
getBlockMetrics,
isContainerType,
shouldSkipAutoLayout,
snapPositionToGrid,
transferBlockHeights,
} from '@/lib/workflows/autolayout/utils'

View File

@@ -1,4 +1,3 @@
import { createLogger } from '@sim/logger'
import {
CONTAINER_PADDING,
DEFAULT_HORIZONTAL_SPACING,
@@ -14,12 +13,11 @@ import {
isContainerType,
prepareContainerDimensions,
shouldSkipAutoLayout,
snapPositionToGrid,
} from '@/lib/workflows/autolayout/utils'
import { CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import type { BlockState } from '@/stores/workflows/workflow/types'
const logger = createLogger('AutoLayout:Targeted')
export interface TargetedLayoutOptions extends LayoutOptions {
changedBlockIds: string[]
verticalSpacing?: number
@@ -39,6 +37,7 @@ export function applyTargetedLayout(
changedBlockIds,
verticalSpacing = DEFAULT_VERTICAL_SPACING,
horizontalSpacing = DEFAULT_HORIZONTAL_SPACING,
gridSize,
} = options
if (!changedBlockIds || changedBlockIds.length === 0) {
@@ -48,19 +47,17 @@ export function applyTargetedLayout(
const changedSet = new Set(changedBlockIds)
const blocksCopy: Record<string, BlockState> = JSON.parse(JSON.stringify(blocks))
// Pre-calculate container dimensions by laying out their children (bottom-up)
// This ensures accurate widths/heights before root-level layout
prepareContainerDimensions(
blocksCopy,
edges,
layoutBlocksCore,
horizontalSpacing,
verticalSpacing
verticalSpacing,
gridSize
)
const groups = getBlocksByParent(blocksCopy)
// Calculate subflow depths before layout to properly position blocks after subflow ends
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
layoutGroup(
@@ -71,7 +68,8 @@ export function applyTargetedLayout(
changedSet,
verticalSpacing,
horizontalSpacing,
subflowDepths
subflowDepths,
gridSize
)
for (const [parentId, childIds] of groups.children.entries()) {
@@ -83,7 +81,8 @@ export function applyTargetedLayout(
changedSet,
verticalSpacing,
horizontalSpacing,
subflowDepths
subflowDepths,
gridSize
)
}
@@ -101,7 +100,8 @@ function layoutGroup(
changedSet: Set<string>,
verticalSpacing: number,
horizontalSpacing: number,
subflowDepths: Map<string, number>
subflowDepths: Map<string, number>,
gridSize?: number
): void {
if (childIds.length === 0) return
@@ -116,7 +116,6 @@ function layoutGroup(
return
}
// Determine which blocks need repositioning
const requestedLayout = layoutEligibleChildIds.filter((id) => {
const block = blocks[id]
if (!block) return false
@@ -141,7 +140,6 @@ function layoutGroup(
return
}
// Store old positions for anchor calculation
const oldPositions = new Map<string, { x: number; y: number }>()
for (const id of layoutEligibleChildIds) {
const block = blocks[id]
@@ -149,8 +147,6 @@ function layoutGroup(
oldPositions.set(id, { ...block.position })
}
// Compute layout positions using core function
// Only pass subflowDepths for root-level layout (not inside containers)
const layoutPositions = computeLayoutPositions(
layoutEligibleChildIds,
blocks,
@@ -158,7 +154,8 @@ function layoutGroup(
parentBlock,
horizontalSpacing,
verticalSpacing,
parentId === null ? subflowDepths : undefined
parentId === null ? subflowDepths : undefined,
gridSize
)
if (layoutPositions.size === 0) {
@@ -168,7 +165,6 @@ function layoutGroup(
return
}
// Find anchor block (unchanged block with a layout position)
let offsetX = 0
let offsetY = 0
@@ -185,20 +181,16 @@ function layoutGroup(
}
}
// Apply new positions only to blocks that need layout
for (const id of needsLayout) {
const block = blocks[id]
const newPos = layoutPositions.get(id)
if (!block || !newPos) continue
block.position = {
x: newPos.x + offsetX,
y: newPos.y + offsetY,
}
block.position = snapPositionToGrid({ x: newPos.x + offsetX, y: newPos.y + offsetY }, gridSize)
}
}
/**
* Computes layout positions for a subset of blocks using the core layout
* Computes layout positions for a subset of blocks using the core layout function
*/
function computeLayoutPositions(
childIds: string[],
@@ -207,7 +199,8 @@ function computeLayoutPositions(
parentBlock: BlockState | undefined,
horizontalSpacing: number,
verticalSpacing: number,
subflowDepths?: Map<string, number>
subflowDepths?: Map<string, number>,
gridSize?: number
): Map<string, { x: number; y: number }> {
const subsetBlocks: Record<string, BlockState> = {}
for (const id of childIds) {
@@ -228,11 +221,11 @@ function computeLayoutPositions(
layoutOptions: {
horizontalSpacing: isContainer ? horizontalSpacing * 0.85 : horizontalSpacing,
verticalSpacing,
gridSize,
},
subflowDepths,
})
// Update parent container dimensions if applicable
if (parentBlock) {
parentBlock.data = {
...parentBlock.data,
@@ -241,7 +234,6 @@ function computeLayoutPositions(
}
}
// Convert nodes to position map
const positions = new Map<string, { x: number; y: number }>()
for (const node of nodes.values()) {
positions.set(node.id, { x: node.position.x, y: node.position.y })

View File

@@ -7,6 +7,7 @@ export interface LayoutOptions {
horizontalSpacing?: number
verticalSpacing?: number
padding?: { x: number; y: number }
gridSize?: number
}
export interface LayoutResult {

View File

@@ -18,6 +18,61 @@ function resolveNumeric(value: number | undefined, fallback: number): number {
return typeof value === 'number' && Number.isFinite(value) ? value : fallback
}
/**
* Snaps a single coordinate value to the nearest grid position
*/
function snapToGrid(value: number, gridSize: number): number {
return Math.round(value / gridSize) * gridSize
}
/**
* Snaps a position to the nearest grid point.
* Returns the original position if gridSize is 0 or not provided.
*/
export function snapPositionToGrid(
position: { x: number; y: number },
gridSize: number | undefined
): { x: number; y: number } {
if (!gridSize || gridSize <= 0) {
return position
}
return {
x: snapToGrid(position.x, gridSize),
y: snapToGrid(position.y, gridSize),
}
}
/**
* Snaps all node positions in a graph to grid positions and returns updated dimensions.
* Returns null if gridSize is not set or no snapping was needed.
*/
export function snapNodesToGrid(
nodes: Map<string, GraphNode>,
gridSize: number | undefined
): { width: number; height: number } | null {
if (!gridSize || gridSize <= 0 || nodes.size === 0) {
return null
}
let minX = Number.POSITIVE_INFINITY
let minY = Number.POSITIVE_INFINITY
let maxX = Number.NEGATIVE_INFINITY
let maxY = Number.NEGATIVE_INFINITY
for (const node of nodes.values()) {
node.position = snapPositionToGrid(node.position, gridSize)
minX = Math.min(minX, node.position.x)
minY = Math.min(minY, node.position.y)
maxX = Math.max(maxX, node.position.x + node.metrics.width)
maxY = Math.max(maxY, node.position.y + node.metrics.height)
}
return {
width: maxX - minX + CONTAINER_PADDING * 2,
height: maxY - minY + CONTAINER_PADDING * 2,
}
}
/**
* Checks if a block type is a container (loop or parallel)
*/
@@ -314,6 +369,7 @@ export type LayoutFunction = (
horizontalSpacing?: number
verticalSpacing?: number
padding?: { x: number; y: number }
gridSize?: number
}
subflowDepths?: Map<string, number>
}
@@ -329,13 +385,15 @@ export type LayoutFunction = (
* @param layoutFn - The layout function to use for calculating dimensions
* @param horizontalSpacing - Horizontal spacing between blocks
* @param verticalSpacing - Vertical spacing between blocks
* @param gridSize - Optional grid size for snap-to-grid
*/
export function prepareContainerDimensions(
blocks: Record<string, BlockState>,
edges: Edge[],
layoutFn: LayoutFunction,
horizontalSpacing: number,
verticalSpacing: number
verticalSpacing: number,
gridSize?: number
): void {
const { children } = getBlocksByParent(blocks)
@@ -402,6 +460,7 @@ export function prepareContainerDimensions(
layoutOptions: {
horizontalSpacing: horizontalSpacing * 0.85,
verticalSpacing,
gridSize,
},
})

View File

@@ -102,7 +102,7 @@ export const azureOpenAIProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

View File

@@ -77,7 +77,7 @@ export const cerebrasProvider: ProviderConfig = {
messages: allMessages,
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {
type: 'json_schema',

View File

@@ -81,7 +81,7 @@ export const deepseekProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null

View File

@@ -349,7 +349,7 @@ export async function executeGeminiRequest(
if (request.temperature !== undefined) {
geminiConfig.temperature = request.temperature
}
if (request.maxTokens !== undefined) {
if (request.maxTokens != null) {
geminiConfig.maxOutputTokens = request.maxTokens
}
if (systemInstruction) {

View File

@@ -123,17 +123,21 @@ export function extractFunctionCallPart(candidate: Candidate | undefined): Part
}
/**
* Converts usage metadata from SDK response to our format
* Converts usage metadata from SDK response to our format.
* Per Gemini docs, total = promptTokenCount + candidatesTokenCount + toolUsePromptTokenCount + thoughtsTokenCount
* We include toolUsePromptTokenCount in input and thoughtsTokenCount in output for correct billing.
*/
export function convertUsageMetadata(
usageMetadata: GenerateContentResponseUsageMetadata | undefined
): GeminiUsage {
const promptTokenCount = usageMetadata?.promptTokenCount ?? 0
const candidatesTokenCount = usageMetadata?.candidatesTokenCount ?? 0
const thoughtsTokenCount = usageMetadata?.thoughtsTokenCount ?? 0
const toolUsePromptTokenCount = usageMetadata?.toolUsePromptTokenCount ?? 0
const promptTokenCount = (usageMetadata?.promptTokenCount ?? 0) + toolUsePromptTokenCount
const candidatesTokenCount = (usageMetadata?.candidatesTokenCount ?? 0) + thoughtsTokenCount
return {
promptTokenCount,
candidatesTokenCount,
totalTokenCount: usageMetadata?.totalTokenCount ?? promptTokenCount + candidatesTokenCount,
totalTokenCount: usageMetadata?.totalTokenCount ?? 0,
}
}

View File

@@ -74,7 +74,7 @@ export const groqProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -91,7 +91,7 @@ export const mistralProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -1130,7 +1130,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
id: 'cerebras',
name: 'Cerebras',
description: 'Cerebras Cloud LLMs',
defaultModel: 'cerebras/llama-3.3-70b',
defaultModel: 'cerebras/gpt-oss-120b',
modelPatterns: [/^cerebras/],
icon: CerebrasIcon,
capabilities: {
@@ -1138,44 +1138,64 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
models: [
{
id: 'cerebras/llama-3.1-8b',
id: 'cerebras/gpt-oss-120b',
pricing: {
input: 0.35,
output: 0.75,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
{
id: 'cerebras/llama3.1-8b',
pricing: {
input: 0.1,
output: 0.1,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 32000,
},
{
id: 'cerebras/llama-3.1-70b',
pricing: {
input: 0.6,
output: 0.6,
updatedAt: '2025-10-11',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'cerebras/llama-3.3-70b',
pricing: {
input: 0.6,
output: 0.6,
updatedAt: '2025-10-11',
input: 0.85,
output: 1.2,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'cerebras/llama-4-scout-17b-16e-instruct',
id: 'cerebras/qwen-3-32b',
pricing: {
input: 0.11,
output: 0.34,
updatedAt: '2025-10-11',
input: 0.4,
output: 0.8,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 10000000,
contextWindow: 131000,
},
{
id: 'cerebras/qwen-3-235b-a22b-instruct-2507',
pricing: {
input: 0.6,
output: 1.2,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
{
id: 'cerebras/zai-glm-4.7',
pricing: {
input: 2.25,
output: 2.75,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
],
},
@@ -1194,8 +1214,8 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
id: 'groq/openai/gpt-oss-120b',
pricing: {
input: 0.15,
output: 0.75,
updatedAt: '2025-10-11',
output: 0.6,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1203,9 +1223,29 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
{
id: 'groq/openai/gpt-oss-20b',
pricing: {
input: 0.01,
output: 0.25,
updatedAt: '2025-10-11',
input: 0.075,
output: 0.3,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/openai/gpt-oss-safeguard-20b',
pricing: {
input: 0.075,
output: 0.3,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/qwen/qwen3-32b',
pricing: {
input: 0.29,
output: 0.59,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1215,7 +1255,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.05,
output: 0.08,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1225,27 +1265,17 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.59,
output: 0.79,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/llama-4-scout-17b-instruct',
id: 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
pricing: {
input: 0.11,
output: 0.34,
updatedAt: '2025-10-11',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/llama-4-maverick-17b-instruct',
pricing: {
input: 0.5,
output: 0.77,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1253,9 +1283,9 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
{
id: 'groq/meta-llama/llama-4-maverick-17b-128e-instruct',
pricing: {
input: 0.5,
output: 0.77,
updatedAt: '2025-10-11',
input: 0.2,
output: 0.6,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1265,7 +1295,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.04,
output: 0.04,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 8192,
@@ -1275,27 +1305,37 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.59,
output: 0.79,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'groq/moonshotai/kimi-k2-instruct',
id: 'groq/deepseek-r1-distill-qwen-32b',
pricing: {
input: 0.69,
output: 0.69,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'groq/moonshotai/kimi-k2-instruct-0905',
pricing: {
input: 1.0,
output: 3.0,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
contextWindow: 262144,
},
{
id: 'groq/meta-llama/llama-guard-4-12b',
pricing: {
input: 0.2,
output: 0.2,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,

View File

@@ -105,7 +105,7 @@ export const ollamaProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -81,7 +81,7 @@ export const openaiProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

View File

@@ -121,7 +121,7 @@ export const openRouterProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
let hasActiveTools = false
@@ -516,7 +516,7 @@ export const openRouterProvider: ProviderConfig = {
return streamingResult as StreamingExecution
}
if (request.responseFormat && hasActiveTools && toolCalls.length > 0) {
if (request.responseFormat && hasActiveTools) {
const finalPayload: any = {
model: payload.model,
messages: [...currentMessages],

View File

@@ -135,7 +135,7 @@ export const vllmProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -92,7 +92,7 @@ export const xAIProvider: ProviderConfig = {
}
if (request.temperature !== undefined) basePayload.temperature = request.temperature
if (request.maxTokens !== undefined) basePayload.max_tokens = request.maxTokens
if (request.maxTokens != null) basePayload.max_completion_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
if (tools?.length) {

View File

@@ -1,15 +1,155 @@
import { RepeatIcon, SplitIcon } from 'lucide-react'
import { create } from 'zustand'
import type { SearchModalState } from './types'
import { devtools } from 'zustand/middleware'
import { getToolOperationsIndex } from '@/lib/search/tool-operations'
import { getTriggersForSidebar } from '@/lib/workflows/triggers/trigger-utils'
import { getAllBlocks } from '@/blocks'
import type {
SearchBlockItem,
SearchData,
SearchDocItem,
SearchModalState,
SearchToolOperationItem,
} from './types'
export const useSearchModalStore = create<SearchModalState>((set) => ({
isOpen: false,
setOpen: (open: boolean) => {
set({ isOpen: open })
},
open: () => {
set({ isOpen: true })
},
close: () => {
set({ isOpen: false })
},
}))
const initialData: SearchData = {
blocks: [],
tools: [],
triggers: [],
toolOperations: [],
docs: [],
isInitialized: false,
}
export const useSearchModalStore = create<SearchModalState>()(
devtools(
(set, get) => ({
isOpen: false,
data: initialData,
setOpen: (open: boolean) => {
set({ isOpen: open })
},
open: () => {
set({ isOpen: true })
},
close: () => {
set({ isOpen: false })
},
initializeData: (filterBlocks) => {
const allBlocks = getAllBlocks()
const filteredAllBlocks = filterBlocks(allBlocks) as typeof allBlocks
const regularBlocks: SearchBlockItem[] = []
const tools: SearchBlockItem[] = []
const docs: SearchDocItem[] = []
for (const block of filteredAllBlocks) {
if (block.hideFromToolbar) continue
const searchItem: SearchBlockItem = {
id: block.type,
name: block.name,
description: block.description || '',
icon: block.icon,
bgColor: block.bgColor || '#6B7280',
type: block.type,
}
if (block.category === 'blocks' && block.type !== 'starter') {
regularBlocks.push(searchItem)
} else if (block.category === 'tools') {
tools.push(searchItem)
}
if (block.docsLink) {
docs.push({
id: `docs-${block.type}`,
name: block.name,
icon: block.icon,
href: block.docsLink,
})
}
}
const specialBlocks: SearchBlockItem[] = [
{
id: 'loop',
name: 'Loop',
description: 'Create a Loop',
icon: RepeatIcon,
bgColor: '#2FB3FF',
type: 'loop',
},
{
id: 'parallel',
name: 'Parallel',
description: 'Parallel Execution',
icon: SplitIcon,
bgColor: '#FEE12B',
type: 'parallel',
},
]
const blocks = [...regularBlocks, ...(filterBlocks(specialBlocks) as SearchBlockItem[])]
const allTriggers = getTriggersForSidebar()
const filteredTriggers = filterBlocks(allTriggers) as typeof allTriggers
const priorityOrder = ['Start', 'Schedule', 'Webhook']
const sortedTriggers = [...filteredTriggers].sort((a, b) => {
const aIndex = priorityOrder.indexOf(a.name)
const bIndex = priorityOrder.indexOf(b.name)
const aHasPriority = aIndex !== -1
const bHasPriority = bIndex !== -1
if (aHasPriority && bHasPriority) return aIndex - bIndex
if (aHasPriority) return -1
if (bHasPriority) return 1
return a.name.localeCompare(b.name)
})
const triggers = sortedTriggers.map(
(block): SearchBlockItem => ({
id: block.type,
name: block.name,
description: block.description || '',
icon: block.icon,
bgColor: block.bgColor || '#6B7280',
type: block.type,
config: block,
})
)
const allowedBlockTypes = new Set(tools.map((t) => t.type))
const toolOperations: SearchToolOperationItem[] = getToolOperationsIndex()
.filter((op) => allowedBlockTypes.has(op.blockType))
.map((op) => ({
id: op.id,
name: op.operationName,
searchValue: `${op.serviceName} ${op.operationName}`,
icon: op.icon,
bgColor: op.bgColor,
blockType: op.blockType,
operationId: op.operationId,
keywords: op.aliases,
}))
set({
data: {
blocks,
tools,
triggers,
toolOperations,
docs,
isInitialized: true,
},
})
},
}),
{ name: 'search-modal-store' }
)
)

View File

@@ -1,3 +1,55 @@
import type { ComponentType } from 'react'
import type { BlockConfig } from '@/blocks/types'
/**
* Represents a block item in the search results.
*/
export interface SearchBlockItem {
id: string
name: string
description: string
icon: ComponentType<{ className?: string }>
bgColor: string
type: string
config?: BlockConfig
}
/**
* Represents a tool operation item in the search results.
*/
export interface SearchToolOperationItem {
id: string
name: string
searchValue: string
icon: ComponentType<{ className?: string }>
bgColor: string
blockType: string
operationId: string
keywords: string[]
}
/**
* Represents a doc item in the search results.
*/
export interface SearchDocItem {
id: string
name: string
icon: ComponentType<{ className?: string }>
href: string
}
/**
* Pre-computed search data that is initialized on app load.
*/
export interface SearchData {
blocks: SearchBlockItem[]
tools: SearchBlockItem[]
triggers: SearchBlockItem[]
toolOperations: SearchToolOperationItem[]
docs: SearchDocItem[]
isInitialized: boolean
}
/**
* Global state for the universal search modal.
*
@@ -8,18 +60,27 @@
export interface SearchModalState {
/** Whether the search modal is currently open. */
isOpen: boolean
/** Pre-computed search data. */
data: SearchData
/**
* Explicitly set the open state of the modal.
*
* @param open - New open state.
*/
setOpen: (open: boolean) => void
/**
* Convenience method to open the modal.
*/
open: () => void
/**
* Convenience method to close the modal.
*/
close: () => void
/**
* Initialize search data. Called once on app load.
*/
initializeData: (filterBlocks: <T extends { type: string }>(blocks: T[]) => T[]) => void
}

View File

@@ -253,23 +253,6 @@ describe('executeTool Function', () => {
vi.restoreAllMocks()
})
it('should handle errors from tools', async () => {
setupFetchMock({ status: 400, ok: false, json: { error: 'Bad request' } })
const result = await executeTool(
'http_request',
{
url: 'https://api.example.com/data',
method: 'GET',
},
true
)
expect(result.success).toBe(false)
expect(result.error).toBeDefined()
expect(result.timing).toBeDefined()
})
it('should add timing information to results', async () => {
const result = await executeTool(
'http_request',