Compare commits

...

15 Commits

Author SHA1 Message Date
waleed
0603101d75 fix(mcp): correct event handler type for onInput 2026-01-28 00:59:23 -08:00
waleed
c4d0fc31cc fix: move generic to function parameter position 2026-01-28 00:57:53 -08:00
waleed
19bc4afcc9 fix: correct filterBlocks type signature 2026-01-28 00:30:52 -08:00
waleed
6901b15260 fix: include keywords in search filter + show service name in tool operations 2026-01-28 00:24:55 -08:00
waleed
fe72c69d44 fix: allow search data re-initialization when permissions change 2026-01-27 23:53:05 -08:00
waleed
1709e1f81f chore: add devtools middleware to search modal store 2026-01-27 23:52:31 -08:00
waleed
e02c156d75 chore: remove comments 2026-01-27 23:44:34 -08:00
waleed
9506fea20d chore: remove unrelated workflow.tsx changes 2026-01-27 23:43:53 -08:00
waleed
6494f614b4 improvement(cmdk): refactor search modal to use cmdk + fix icon SVG IDs 2026-01-27 23:42:43 -08:00
Vikhyath Mondreti
c8ffda1616 fix(gemini): token count (#3039)
* fix(gemini): token count

* fix to include tool call tokens
2026-01-27 19:16:54 -08:00
Waleed
b4a389a71f improvement(helm): update GPU device plugin and add cert-manager issuers (#3036)
* improvement(helm): update GPU device plugin and add cert-manager issuers

* fix(helm): address code review feedback for GPU plugin and cert-manager

* fix(helm): remove duplicate nodeSelector, add hook for CA issuer ordering

* fix(helm): remove incorrect hook, CA issuer auto-reconciles
2026-01-27 18:25:08 -08:00
Vikhyath Mondreti
65bc21608c improvement(block-inputs): must parse json accurately + models max_tokens fix (#3033)
* improvement(block-inputs): must parse json accurately

* fix sheets typing

* add reference comment

* fix models

* revert blocks changes

* fix param to follow openai new convention
2026-01-27 18:17:35 -08:00
Waleed
ef613ef035 fix(models): update cerebras and groq models (#3038) 2026-01-27 18:12:48 -08:00
Waleed
20b76e67b3 improvement(skills): extend skills (#3035) 2026-01-27 17:58:58 -08:00
Waleed
7640fdf742 feat(autolayout): add snap-to-grid support (#3031)
* feat(autolayout): add snap-to-grid support

* fix(autolayout): recalculate dimensions after grid snapping

* fix(autolayout): correct dimension calculation and propagate gridSize
2026-01-27 17:02:27 -08:00
36 changed files with 1502 additions and 1511 deletions

View File

@@ -55,21 +55,21 @@ export const {serviceName}{Action}Tool: ToolConfig<
},
params: {
// Hidden params (system-injected)
// Hidden params (system-injected, only use hidden for oauth accessToken)
accessToken: {
type: 'string',
required: true,
visibility: 'hidden',
description: 'OAuth access token',
},
// User-only params (credentials, IDs user must provide)
// User-only params (credentials, api key, IDs user must provide)
someId: {
type: 'string',
required: true,
visibility: 'user-only',
description: 'The ID of the resource',
},
// User-or-LLM params (can be provided by user OR computed by LLM)
// User-or-LLM params (everything else, can be provided by user OR computed by LLM)
query: {
type: 'string',
required: false, // Use false for optional
@@ -114,8 +114,8 @@ export const {serviceName}{Action}Tool: ToolConfig<
### Visibility Options
- `'hidden'` - System-injected (OAuth tokens, internal params). User never sees.
- `'user-only'` - User must provide (credentials, account-specific IDs)
- `'user-or-llm'` - User provides OR LLM can compute (search queries, content, filters)
- `'user-only'` - User must provide (credentials, api keys, account-specific IDs)
- `'user-or-llm'` - User provides OR LLM can compute (search queries, content, filters, most fall into this category)
### Parameter Types
- `'string'` - Text values

File diff suppressed because one or more lines are too long

View File

@@ -35,8 +35,7 @@ const AutoLayoutRequestSchema = z.object({
})
.optional()
.default({}),
// Optional: if provided, use these blocks instead of loading from DB
// This allows using blocks with live measurements from the UI
gridSize: z.number().min(0).max(50).optional(),
blocks: z.record(z.any()).optional(),
edges: z.array(z.any()).optional(),
loops: z.record(z.any()).optional(),
@@ -53,7 +52,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
const { id: workflowId } = await params
try {
// Get the session
const session = await getSession()
if (!session?.user?.id) {
logger.warn(`[${requestId}] Unauthorized autolayout attempt for workflow ${workflowId}`)
@@ -62,7 +60,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
const userId = session.user.id
// Parse request body
const body = await request.json()
const layoutOptions = AutoLayoutRequestSchema.parse(body)
@@ -70,7 +67,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
userId,
})
// Fetch the workflow to check ownership/access
const accessContext = await getWorkflowAccessContext(workflowId, userId)
const workflowData = accessContext?.workflow
@@ -79,7 +75,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
}
// Check if user has permission to update this workflow
const canUpdate =
accessContext?.isOwner ||
(workflowData.workspaceId
@@ -94,8 +89,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
}
// Use provided blocks/edges if available (with live measurements from UI),
// otherwise load from database
let currentWorkflowData: NormalizedWorkflowData | null
if (layoutOptions.blocks && layoutOptions.edges) {
@@ -125,6 +118,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
y: layoutOptions.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
},
alignment: layoutOptions.alignment,
gridSize: layoutOptions.gridSize,
}
const layoutResult = applyAutoLayout(

View File

@@ -1,108 +0,0 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { generateRequestId } from '@/lib/core/utils/request'
import { applyAutoLayout } from '@/lib/workflows/autolayout'
import {
DEFAULT_HORIZONTAL_SPACING,
DEFAULT_LAYOUT_PADDING,
DEFAULT_VERTICAL_SPACING,
} from '@/lib/workflows/autolayout/constants'
const logger = createLogger('YamlAutoLayoutAPI')
const AutoLayoutRequestSchema = z.object({
workflowState: z.object({
blocks: z.record(z.any()),
edges: z.array(z.any()),
loops: z.record(z.any()).optional().default({}),
parallels: z.record(z.any()).optional().default({}),
}),
options: z
.object({
spacing: z
.object({
horizontal: z.number().optional(),
vertical: z.number().optional(),
})
.optional(),
alignment: z.enum(['start', 'center', 'end']).optional(),
padding: z
.object({
x: z.number().optional(),
y: z.number().optional(),
})
.optional(),
})
.optional(),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const body = await request.json()
const { workflowState, options } = AutoLayoutRequestSchema.parse(body)
logger.info(`[${requestId}] Applying auto layout`, {
blockCount: Object.keys(workflowState.blocks).length,
edgeCount: workflowState.edges.length,
})
const autoLayoutOptions = {
horizontalSpacing: options?.spacing?.horizontal ?? DEFAULT_HORIZONTAL_SPACING,
verticalSpacing: options?.spacing?.vertical ?? DEFAULT_VERTICAL_SPACING,
padding: {
x: options?.padding?.x ?? DEFAULT_LAYOUT_PADDING.x,
y: options?.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
},
alignment: options?.alignment ?? 'center',
}
const layoutResult = applyAutoLayout(
workflowState.blocks,
workflowState.edges,
autoLayoutOptions
)
if (!layoutResult.success || !layoutResult.blocks) {
logger.error(`[${requestId}] Auto layout failed:`, {
error: layoutResult.error,
})
return NextResponse.json(
{
success: false,
errors: [layoutResult.error || 'Unknown auto layout error'],
},
{ status: 500 }
)
}
logger.info(`[${requestId}] Auto layout completed successfully:`, {
success: true,
blockCount: Object.keys(layoutResult.blocks).length,
})
const transformedResponse = {
success: true,
workflowState: {
blocks: layoutResult.blocks,
edges: workflowState.edges,
loops: workflowState.loops || {},
parallels: workflowState.parallels || {},
},
}
return NextResponse.json(transformedResponse)
} catch (error) {
logger.error(`[${requestId}] Auto layout failed:`, error)
return NextResponse.json(
{
success: false,
errors: [error instanceof Error ? error.message : 'Unknown auto layout error'],
},
{ status: 500 }
)
}
}

View File

@@ -1,241 +0,0 @@
/**
* Search utility functions for tiered matching algorithm
* Provides predictable search results prioritizing exact matches over fuzzy matches
*/
export interface SearchableItem {
id: string
name: string
description?: string
type: string
aliases?: string[]
[key: string]: any
}
export interface SearchResult<T extends SearchableItem> {
item: T
score: number
matchType: 'exact' | 'prefix' | 'alias' | 'word-boundary' | 'substring' | 'description'
}
const SCORE_EXACT_MATCH = 10000
const SCORE_PREFIX_MATCH = 5000
const SCORE_ALIAS_MATCH = 3000
const SCORE_WORD_BOUNDARY = 1000
const SCORE_SUBSTRING_MATCH = 100
const DESCRIPTION_WEIGHT = 0.3
/**
* Calculate match score for a single field
* Returns 0 if no match found
*/
function calculateFieldScore(
query: string,
field: string
): {
score: number
matchType: 'exact' | 'prefix' | 'word-boundary' | 'substring' | null
} {
const normalizedQuery = query.toLowerCase().trim()
const normalizedField = field.toLowerCase().trim()
if (!normalizedQuery || !normalizedField) {
return { score: 0, matchType: null }
}
// Tier 1: Exact match
if (normalizedField === normalizedQuery) {
return { score: SCORE_EXACT_MATCH, matchType: 'exact' }
}
// Tier 2: Prefix match (starts with query)
if (normalizedField.startsWith(normalizedQuery)) {
return { score: SCORE_PREFIX_MATCH, matchType: 'prefix' }
}
// Tier 3: Word boundary match (query matches start of a word)
const words = normalizedField.split(/[\s-_/]+/)
const hasWordBoundaryMatch = words.some((word) => word.startsWith(normalizedQuery))
if (hasWordBoundaryMatch) {
return { score: SCORE_WORD_BOUNDARY, matchType: 'word-boundary' }
}
// Tier 4: Substring match (query appears anywhere)
if (normalizedField.includes(normalizedQuery)) {
return { score: SCORE_SUBSTRING_MATCH, matchType: 'substring' }
}
// No match
return { score: 0, matchType: null }
}
/**
* Check if query matches any alias in the item's aliases array
* Returns the alias score if a match is found, 0 otherwise
*/
function calculateAliasScore(
query: string,
aliases?: string[]
): { score: number; matchType: 'alias' | null } {
if (!aliases || aliases.length === 0) {
return { score: 0, matchType: null }
}
const normalizedQuery = query.toLowerCase().trim()
for (const alias of aliases) {
const normalizedAlias = alias.toLowerCase().trim()
if (normalizedAlias === normalizedQuery) {
return { score: SCORE_ALIAS_MATCH, matchType: 'alias' }
}
if (normalizedAlias.startsWith(normalizedQuery)) {
return { score: SCORE_ALIAS_MATCH * 0.8, matchType: 'alias' }
}
if (normalizedQuery.includes(normalizedAlias) || normalizedAlias.includes(normalizedQuery)) {
return { score: SCORE_ALIAS_MATCH * 0.6, matchType: 'alias' }
}
}
return { score: 0, matchType: null }
}
/**
* Calculate multi-word match score
* Each word in the query must appear somewhere in the field
* Returns a score based on how well the words match
*/
function calculateMultiWordScore(
queryWords: string[],
field: string
): { score: number; matchType: 'word-boundary' | 'substring' | null } {
const normalizedField = field.toLowerCase().trim()
const fieldWords = normalizedField.split(/[\s\-_/:]+/)
let allWordsMatch = true
let totalScore = 0
let hasWordBoundary = false
for (const queryWord of queryWords) {
const wordBoundaryMatch = fieldWords.some((fw) => fw.startsWith(queryWord))
const substringMatch = normalizedField.includes(queryWord)
if (wordBoundaryMatch) {
totalScore += SCORE_WORD_BOUNDARY
hasWordBoundary = true
} else if (substringMatch) {
totalScore += SCORE_SUBSTRING_MATCH
} else {
allWordsMatch = false
break
}
}
if (!allWordsMatch) {
return { score: 0, matchType: null }
}
return {
score: totalScore / queryWords.length,
matchType: hasWordBoundary ? 'word-boundary' : 'substring',
}
}
/**
* Search items using tiered matching algorithm
* Returns items sorted by relevance (highest score first)
*/
export function searchItems<T extends SearchableItem>(
query: string,
items: T[]
): SearchResult<T>[] {
const normalizedQuery = query.trim()
if (!normalizedQuery) {
return []
}
const results: SearchResult<T>[] = []
const queryWords = normalizedQuery.toLowerCase().split(/\s+/).filter(Boolean)
const isMultiWord = queryWords.length > 1
for (const item of items) {
const nameMatch = calculateFieldScore(normalizedQuery, item.name)
const descMatch = item.description
? calculateFieldScore(normalizedQuery, item.description)
: { score: 0, matchType: null }
const aliasMatch = calculateAliasScore(normalizedQuery, item.aliases)
let nameScore = nameMatch.score
let descScore = descMatch.score * DESCRIPTION_WEIGHT
const aliasScore = aliasMatch.score
let bestMatchType = nameMatch.matchType
// For multi-word queries, also try matching each word independently and take the better score
if (isMultiWord) {
const multiWordNameMatch = calculateMultiWordScore(queryWords, item.name)
if (multiWordNameMatch.score > nameScore) {
nameScore = multiWordNameMatch.score
bestMatchType = multiWordNameMatch.matchType
}
if (item.description) {
const multiWordDescMatch = calculateMultiWordScore(queryWords, item.description)
const multiWordDescScore = multiWordDescMatch.score * DESCRIPTION_WEIGHT
if (multiWordDescScore > descScore) {
descScore = multiWordDescScore
}
}
}
const bestScore = Math.max(nameScore, descScore, aliasScore)
if (bestScore > 0) {
let matchType: SearchResult<T>['matchType'] = 'substring'
if (nameScore >= descScore && nameScore >= aliasScore) {
matchType = bestMatchType || 'substring'
} else if (aliasScore >= descScore) {
matchType = 'alias'
} else {
matchType = 'description'
}
results.push({
item,
score: bestScore,
matchType,
})
}
}
results.sort((a, b) => b.score - a.score)
return results
}
/**
* Get a human-readable match type label
*/
export function getMatchTypeLabel(matchType: SearchResult<any>['matchType']): string {
switch (matchType) {
case 'exact':
return 'Exact match'
case 'prefix':
return 'Starts with'
case 'alias':
return 'Similar to'
case 'word-boundary':
return 'Word match'
case 'substring':
return 'Contains'
case 'description':
return 'In description'
default:
return 'Match'
}
}

View File

@@ -176,7 +176,7 @@ function FormattedInput({
onChange,
onScroll,
}: FormattedInputProps) {
const handleScroll = (e: React.UIEvent<HTMLInputElement>) => {
const handleScroll = (e: { currentTarget: HTMLInputElement }) => {
onScroll(e.currentTarget.scrollLeft)
}

View File

@@ -73,7 +73,12 @@ export const Sidebar = memo(function Sidebar() {
const { data: sessionData, isPending: sessionLoading } = useSession()
const { canEdit } = useUserPermissionsContext()
const { config: permissionConfig } = usePermissionConfig()
const { config: permissionConfig, filterBlocks } = usePermissionConfig()
const initializeSearchData = useSearchModalStore((state) => state.initializeData)
useEffect(() => {
initializeSearchData(filterBlocks)
}, [initializeSearchData, filterBlocks])
/**
* Sidebar state from store with hydration tracking to prevent SSR mismatch.

File diff suppressed because one or more lines are too long

View File

@@ -28,6 +28,7 @@ import type {
} from '@/executor/types'
import { streamingResponseFormatProcessor } from '@/executor/utils'
import { buildBlockExecutionError, normalizeError } from '@/executor/utils/errors'
import { isJSONString } from '@/executor/utils/json'
import { filterOutputForLog } from '@/executor/utils/output-filter'
import { validateBlockType } from '@/executor/utils/permission-check'
import type { VariableResolver } from '@/executor/variables/resolver'
@@ -86,7 +87,7 @@ export class BlockExecutor {
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
if (blockLog) {
blockLog.input = resolvedInputs
blockLog.input = this.parseJsonInputs(resolvedInputs)
}
} catch (error) {
cleanupSelfReference?.()
@@ -157,7 +158,14 @@ export class BlockExecutor {
const displayOutput = filterOutputForLog(block.metadata?.id || '', normalizedOutput, {
block,
})
this.callOnBlockComplete(ctx, node, block, resolvedInputs, displayOutput, duration)
this.callOnBlockComplete(
ctx,
node,
block,
this.parseJsonInputs(resolvedInputs),
displayOutput,
duration
)
}
return normalizedOutput
@@ -233,7 +241,7 @@ export class BlockExecutor {
blockLog.durationMs = duration
blockLog.success = false
blockLog.error = errorMessage
blockLog.input = input
blockLog.input = this.parseJsonInputs(input)
blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
}
@@ -248,7 +256,14 @@ export class BlockExecutor {
if (!isSentinel) {
const displayOutput = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
this.callOnBlockComplete(ctx, node, block, input, displayOutput, duration)
this.callOnBlockComplete(
ctx,
node,
block,
this.parseJsonInputs(input),
displayOutput,
duration
)
}
const hasErrorPort = this.hasErrorPortEdge(node)
@@ -336,6 +351,36 @@ export class BlockExecutor {
return { result: output }
}
/**
* Parse JSON string inputs to objects for log display only.
* Attempts to parse any string that looks like JSON.
* Returns a new object - does not mutate the original inputs.
*/
private parseJsonInputs(inputs: Record<string, any>): Record<string, any> {
let result = inputs
let hasChanges = false
for (const [key, value] of Object.entries(inputs)) {
// isJSONString is a quick heuristic (checks for { or [), not a validator.
// Invalid JSON is safely caught below - this just avoids JSON.parse on every string.
if (typeof value !== 'string' || !isJSONString(value)) {
continue
}
try {
if (!hasChanges) {
result = { ...inputs }
hasChanges = true
}
result[key] = JSON.parse(value.trim())
} catch {
// Not valid JSON, keep original string
}
}
return result
}
private callOnBlockStart(ctx: ExecutionContext, node: DAGNode, block: SerializedBlock): void {
const blockId = node.id
const blockName = block.metadata?.name ?? blockId

View File

@@ -936,8 +936,12 @@ export class AgentBlockHandler implements BlockHandler {
systemPrompt: validMessages ? undefined : inputs.systemPrompt,
context: validMessages ? undefined : stringifyJSON(messages),
tools: formattedTools,
temperature: inputs.temperature,
maxTokens: inputs.maxTokens,
temperature:
inputs.temperature != null && inputs.temperature !== ''
? Number(inputs.temperature)
: undefined,
maxTokens:
inputs.maxTokens != null && inputs.maxTokens !== '' ? Number(inputs.maxTokens) : undefined,
apiKey: inputs.apiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,

View File

@@ -14,8 +14,8 @@ export interface AgentInputs {
slidingWindowSize?: string // For message-based sliding window
slidingWindowTokens?: string // For token-based sliding window
// LLM parameters
temperature?: number
maxTokens?: number
temperature?: string
maxTokens?: string
apiKey?: string
azureEndpoint?: string
azureApiVersion?: string

View File

@@ -34,6 +34,7 @@ export function layoutContainers(
: DEFAULT_CONTAINER_HORIZONTAL_SPACING,
verticalSpacing: options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING,
padding: { x: CONTAINER_PADDING_X, y: CONTAINER_PADDING_Y },
gridSize: options.gridSize,
}
for (const [parentId, childIds] of children.entries()) {
@@ -56,18 +57,15 @@ export function layoutContainers(
continue
}
// Use the shared core layout function with container options
const { nodes, dimensions } = layoutBlocksCore(childBlocks, childEdges, {
isContainer: true,
layoutOptions: containerOptions,
})
// Apply positions back to blocks
for (const node of nodes.values()) {
blocks[node.id].position = node.position
}
// Update container dimensions
const calculatedWidth = dimensions.width
const calculatedHeight = dimensions.height

View File

@@ -9,6 +9,7 @@ import {
getBlockMetrics,
normalizePositions,
prepareBlockMetrics,
snapNodesToGrid,
} from '@/lib/workflows/autolayout/utils'
import { BLOCK_DIMENSIONS, HANDLE_POSITIONS } from '@/lib/workflows/blocks/block-dimensions'
import { EDGE } from '@/executor/constants'
@@ -84,7 +85,6 @@ export function assignLayers(
): Map<string, GraphNode> {
const nodes = new Map<string, GraphNode>()
// Initialize nodes
for (const [id, block] of Object.entries(blocks)) {
nodes.set(id, {
id,
@@ -97,7 +97,6 @@ export function assignLayers(
})
}
// Build a map of target node -> edges coming into it (to check sourceHandle later)
const incomingEdgesMap = new Map<string, Edge[]>()
for (const edge of edges) {
if (!incomingEdgesMap.has(edge.target)) {
@@ -106,7 +105,6 @@ export function assignLayers(
incomingEdgesMap.get(edge.target)!.push(edge)
}
// Build adjacency from edges
for (const edge of edges) {
const sourceNode = nodes.get(edge.source)
const targetNode = nodes.get(edge.target)
@@ -117,7 +115,6 @@ export function assignLayers(
}
}
// Find starter nodes (no incoming edges)
const starterNodes = Array.from(nodes.values()).filter((node) => node.incoming.size === 0)
if (starterNodes.length === 0 && nodes.size > 0) {
@@ -126,7 +123,6 @@ export function assignLayers(
logger.warn('No starter blocks found, using first block as starter', { blockId: firstNode.id })
}
// Topological sort using Kahn's algorithm
const inDegreeCount = new Map<string, number>()
for (const node of nodes.values()) {
@@ -144,8 +140,6 @@ export function assignLayers(
const node = nodes.get(nodeId)!
processed.add(nodeId)
// Calculate layer based on max incoming layer + 1
// For edges from subflow ends, add the subflow's internal depth (minus 1 to avoid double-counting)
if (node.incoming.size > 0) {
let maxEffectiveLayer = -1
const incomingEdges = incomingEdgesMap.get(nodeId) || []
@@ -153,16 +147,11 @@ export function assignLayers(
for (const incomingId of node.incoming) {
const incomingNode = nodes.get(incomingId)
if (incomingNode) {
// Find edges from this incoming node to check if it's a subflow end edge
const edgesFromSource = incomingEdges.filter((e) => e.source === incomingId)
let additionalDepth = 0
// Check if any edge from this source is a subflow end edge
const hasSubflowEndEdge = edgesFromSource.some(isSubflowEndEdge)
if (hasSubflowEndEdge && subflowDepths) {
// Get the internal depth of the subflow
// Subtract 1 because the +1 at the end of layer calculation already accounts for one layer
// E.g., if subflow has 2 internal layers (depth=2), we add 1 extra so total offset is 2
const depth = subflowDepths.get(incomingId) ?? 1
additionalDepth = Math.max(0, depth - 1)
}
@@ -174,7 +163,6 @@ export function assignLayers(
node.layer = maxEffectiveLayer + 1
}
// Add outgoing nodes when all dependencies processed
for (const targetId of node.outgoing) {
const currentCount = inDegreeCount.get(targetId) || 0
inDegreeCount.set(targetId, currentCount - 1)
@@ -185,7 +173,6 @@ export function assignLayers(
}
}
// Handle isolated nodes
for (const node of nodes.values()) {
if (!processed.has(node.id)) {
logger.debug('Isolated node detected, assigning to layer 0', { blockId: node.id })
@@ -224,7 +211,6 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
hasOverlap = false
iteration++
// Group nodes by layer for same-layer overlap resolution
const nodesByLayer = new Map<number, GraphNode[]>()
for (const node of nodes) {
if (!nodesByLayer.has(node.layer)) {
@@ -233,11 +219,9 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
nodesByLayer.get(node.layer)!.push(node)
}
// Process each layer independently
for (const [layer, layerNodes] of nodesByLayer) {
if (layerNodes.length < 2) continue
// Sort by Y position for consistent processing
layerNodes.sort((a, b) => a.position.y - b.position.y)
for (let i = 0; i < layerNodes.length - 1; i++) {
@@ -302,7 +286,6 @@ export function calculatePositions(
const layerNumbers = Array.from(layers.keys()).sort((a, b) => a - b)
// Calculate max width for each layer
const layerWidths = new Map<number, number>()
for (const layerNum of layerNumbers) {
const nodesInLayer = layers.get(layerNum)!
@@ -310,7 +293,6 @@ export function calculatePositions(
layerWidths.set(layerNum, maxWidth)
}
// Calculate cumulative X positions for each layer based on actual widths
const layerXPositions = new Map<number, number>()
let cumulativeX = padding.x
@@ -319,7 +301,6 @@ export function calculatePositions(
cumulativeX += layerWidths.get(layerNum)! + horizontalSpacing
}
// Build a flat map of all nodes for quick lookups
const allNodes = new Map<string, GraphNode>()
for (const nodesInLayer of layers.values()) {
for (const node of nodesInLayer) {
@@ -327,7 +308,6 @@ export function calculatePositions(
}
}
// Build incoming edges map for handle lookups
const incomingEdgesMap = new Map<string, Edge[]>()
for (const edge of edges) {
if (!incomingEdgesMap.has(edge.target)) {
@@ -336,20 +316,16 @@ export function calculatePositions(
incomingEdgesMap.get(edge.target)!.push(edge)
}
// Position nodes layer by layer, aligning with connected predecessors
for (const layerNum of layerNumbers) {
const nodesInLayer = layers.get(layerNum)!
const xPosition = layerXPositions.get(layerNum)!
// Separate containers and non-containers
const containersInLayer = nodesInLayer.filter(isContainerBlock)
const nonContainersInLayer = nodesInLayer.filter((n) => !isContainerBlock(n))
// For the first layer (layer 0), position sequentially from padding.y
if (layerNum === 0) {
let yOffset = padding.y
// Sort containers by height for visual balance
containersInLayer.sort((a, b) => b.metrics.height - a.metrics.height)
for (const node of containersInLayer) {
@@ -361,7 +337,6 @@ export function calculatePositions(
yOffset += CONTAINER_VERTICAL_CLEARANCE
}
// Sort non-containers by outgoing connections
nonContainersInLayer.sort((a, b) => b.outgoing.size - a.outgoing.size)
for (const node of nonContainersInLayer) {
@@ -371,9 +346,7 @@ export function calculatePositions(
continue
}
// For subsequent layers, align with connected predecessors (handle-to-handle)
for (const node of [...containersInLayer, ...nonContainersInLayer]) {
// Find the bottommost predecessor handle Y (highest value) and align to it
let bestSourceHandleY = -1
let bestEdge: Edge | null = null
const incomingEdges = incomingEdgesMap.get(node.id) || []
@@ -381,7 +354,6 @@ export function calculatePositions(
for (const edge of incomingEdges) {
const predecessor = allNodes.get(edge.source)
if (predecessor) {
// Calculate actual source handle Y position based on block type and handle
const sourceHandleOffset = getSourceHandleYOffset(predecessor.block, edge.sourceHandle)
const sourceHandleY = predecessor.position.y + sourceHandleOffset
@@ -392,20 +364,16 @@ export function calculatePositions(
}
}
// If no predecessors found (shouldn't happen for layer > 0), use padding
if (bestSourceHandleY < 0) {
bestSourceHandleY = padding.y + HANDLE_POSITIONS.DEFAULT_Y_OFFSET
}
// Calculate the target handle Y offset for this node
const targetHandleOffset = getTargetHandleYOffset(node.block, bestEdge?.targetHandle)
// Position node so its target handle aligns with the source handle Y
node.position = { x: xPosition, y: bestSourceHandleY - targetHandleOffset }
}
}
// Resolve vertical overlaps within layers (X overlaps prevented by cumulative positioning)
resolveVerticalOverlaps(Array.from(layers.values()).flat(), verticalSpacing)
}
@@ -435,7 +403,7 @@ export function layoutBlocksCore(
return { nodes: new Map(), dimensions: { width: 0, height: 0 } }
}
const layoutOptions =
const layoutOptions: LayoutOptions =
options.layoutOptions ??
(options.isContainer ? CONTAINER_LAYOUT_OPTIONS : DEFAULT_LAYOUT_OPTIONS)
@@ -452,7 +420,13 @@ export function layoutBlocksCore(
calculatePositions(layers, edges, layoutOptions)
// 5. Normalize positions
const dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
let dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
// 6. Snap to grid if gridSize is specified (recalculates dimensions)
const snappedDimensions = snapNodesToGrid(nodes, layoutOptions.gridSize)
if (snappedDimensions) {
dimensions = snappedDimensions
}
return { nodes, dimensions }
}

View File

@@ -36,14 +36,13 @@ export function applyAutoLayout(
const horizontalSpacing = options.horizontalSpacing ?? DEFAULT_HORIZONTAL_SPACING
const verticalSpacing = options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING
// Pre-calculate container dimensions by laying out their children (bottom-up)
// This ensures accurate widths/heights before root-level layout
prepareContainerDimensions(
blocksCopy,
edges,
layoutBlocksCore,
horizontalSpacing,
verticalSpacing
verticalSpacing,
options.gridSize
)
const { root: rootBlockIds } = getBlocksByParent(blocksCopy)
@@ -58,8 +57,6 @@ export function applyAutoLayout(
(edge) => layoutRootIds.includes(edge.source) && layoutRootIds.includes(edge.target)
)
// Calculate subflow depths before laying out root blocks
// This ensures blocks connected to subflow ends are positioned correctly
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
if (Object.keys(rootBlocks).length > 0) {
@@ -95,13 +92,12 @@ export function applyAutoLayout(
}
export type { TargetedLayoutOptions } from '@/lib/workflows/autolayout/targeted'
// Function exports
export { applyTargetedLayout } from '@/lib/workflows/autolayout/targeted'
// Type exports
export type { Edge, LayoutOptions, LayoutResult } from '@/lib/workflows/autolayout/types'
export {
getBlockMetrics,
isContainerType,
shouldSkipAutoLayout,
snapPositionToGrid,
transferBlockHeights,
} from '@/lib/workflows/autolayout/utils'

View File

@@ -1,4 +1,3 @@
import { createLogger } from '@sim/logger'
import {
CONTAINER_PADDING,
DEFAULT_HORIZONTAL_SPACING,
@@ -14,12 +13,11 @@ import {
isContainerType,
prepareContainerDimensions,
shouldSkipAutoLayout,
snapPositionToGrid,
} from '@/lib/workflows/autolayout/utils'
import { CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import type { BlockState } from '@/stores/workflows/workflow/types'
const logger = createLogger('AutoLayout:Targeted')
export interface TargetedLayoutOptions extends LayoutOptions {
changedBlockIds: string[]
verticalSpacing?: number
@@ -39,6 +37,7 @@ export function applyTargetedLayout(
changedBlockIds,
verticalSpacing = DEFAULT_VERTICAL_SPACING,
horizontalSpacing = DEFAULT_HORIZONTAL_SPACING,
gridSize,
} = options
if (!changedBlockIds || changedBlockIds.length === 0) {
@@ -48,19 +47,17 @@ export function applyTargetedLayout(
const changedSet = new Set(changedBlockIds)
const blocksCopy: Record<string, BlockState> = JSON.parse(JSON.stringify(blocks))
// Pre-calculate container dimensions by laying out their children (bottom-up)
// This ensures accurate widths/heights before root-level layout
prepareContainerDimensions(
blocksCopy,
edges,
layoutBlocksCore,
horizontalSpacing,
verticalSpacing
verticalSpacing,
gridSize
)
const groups = getBlocksByParent(blocksCopy)
// Calculate subflow depths before layout to properly position blocks after subflow ends
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
layoutGroup(
@@ -71,7 +68,8 @@ export function applyTargetedLayout(
changedSet,
verticalSpacing,
horizontalSpacing,
subflowDepths
subflowDepths,
gridSize
)
for (const [parentId, childIds] of groups.children.entries()) {
@@ -83,7 +81,8 @@ export function applyTargetedLayout(
changedSet,
verticalSpacing,
horizontalSpacing,
subflowDepths
subflowDepths,
gridSize
)
}
@@ -101,7 +100,8 @@ function layoutGroup(
changedSet: Set<string>,
verticalSpacing: number,
horizontalSpacing: number,
subflowDepths: Map<string, number>
subflowDepths: Map<string, number>,
gridSize?: number
): void {
if (childIds.length === 0) return
@@ -116,7 +116,6 @@ function layoutGroup(
return
}
// Determine which blocks need repositioning
const requestedLayout = layoutEligibleChildIds.filter((id) => {
const block = blocks[id]
if (!block) return false
@@ -141,7 +140,6 @@ function layoutGroup(
return
}
// Store old positions for anchor calculation
const oldPositions = new Map<string, { x: number; y: number }>()
for (const id of layoutEligibleChildIds) {
const block = blocks[id]
@@ -149,8 +147,6 @@ function layoutGroup(
oldPositions.set(id, { ...block.position })
}
// Compute layout positions using core function
// Only pass subflowDepths for root-level layout (not inside containers)
const layoutPositions = computeLayoutPositions(
layoutEligibleChildIds,
blocks,
@@ -158,7 +154,8 @@ function layoutGroup(
parentBlock,
horizontalSpacing,
verticalSpacing,
parentId === null ? subflowDepths : undefined
parentId === null ? subflowDepths : undefined,
gridSize
)
if (layoutPositions.size === 0) {
@@ -168,7 +165,6 @@ function layoutGroup(
return
}
// Find anchor block (unchanged block with a layout position)
let offsetX = 0
let offsetY = 0
@@ -185,20 +181,16 @@ function layoutGroup(
}
}
// Apply new positions only to blocks that need layout
for (const id of needsLayout) {
const block = blocks[id]
const newPos = layoutPositions.get(id)
if (!block || !newPos) continue
block.position = {
x: newPos.x + offsetX,
y: newPos.y + offsetY,
}
block.position = snapPositionToGrid({ x: newPos.x + offsetX, y: newPos.y + offsetY }, gridSize)
}
}
/**
* Computes layout positions for a subset of blocks using the core layout
* Computes layout positions for a subset of blocks using the core layout function
*/
function computeLayoutPositions(
childIds: string[],
@@ -207,7 +199,8 @@ function computeLayoutPositions(
parentBlock: BlockState | undefined,
horizontalSpacing: number,
verticalSpacing: number,
subflowDepths?: Map<string, number>
subflowDepths?: Map<string, number>,
gridSize?: number
): Map<string, { x: number; y: number }> {
const subsetBlocks: Record<string, BlockState> = {}
for (const id of childIds) {
@@ -228,11 +221,11 @@ function computeLayoutPositions(
layoutOptions: {
horizontalSpacing: isContainer ? horizontalSpacing * 0.85 : horizontalSpacing,
verticalSpacing,
gridSize,
},
subflowDepths,
})
// Update parent container dimensions if applicable
if (parentBlock) {
parentBlock.data = {
...parentBlock.data,
@@ -241,7 +234,6 @@ function computeLayoutPositions(
}
}
// Convert nodes to position map
const positions = new Map<string, { x: number; y: number }>()
for (const node of nodes.values()) {
positions.set(node.id, { x: node.position.x, y: node.position.y })

View File

@@ -7,6 +7,7 @@ export interface LayoutOptions {
horizontalSpacing?: number
verticalSpacing?: number
padding?: { x: number; y: number }
gridSize?: number
}
export interface LayoutResult {

View File

@@ -18,6 +18,61 @@ function resolveNumeric(value: number | undefined, fallback: number): number {
return typeof value === 'number' && Number.isFinite(value) ? value : fallback
}
/**
* Snaps a single coordinate value to the nearest grid position
*/
function snapToGrid(value: number, gridSize: number): number {
return Math.round(value / gridSize) * gridSize
}
/**
* Snaps a position to the nearest grid point.
* Returns the original position if gridSize is 0 or not provided.
*/
export function snapPositionToGrid(
position: { x: number; y: number },
gridSize: number | undefined
): { x: number; y: number } {
if (!gridSize || gridSize <= 0) {
return position
}
return {
x: snapToGrid(position.x, gridSize),
y: snapToGrid(position.y, gridSize),
}
}
/**
* Snaps all node positions in a graph to grid positions and returns updated dimensions.
* Returns null if gridSize is not set or no snapping was needed.
*/
export function snapNodesToGrid(
nodes: Map<string, GraphNode>,
gridSize: number | undefined
): { width: number; height: number } | null {
if (!gridSize || gridSize <= 0 || nodes.size === 0) {
return null
}
let minX = Number.POSITIVE_INFINITY
let minY = Number.POSITIVE_INFINITY
let maxX = Number.NEGATIVE_INFINITY
let maxY = Number.NEGATIVE_INFINITY
for (const node of nodes.values()) {
node.position = snapPositionToGrid(node.position, gridSize)
minX = Math.min(minX, node.position.x)
minY = Math.min(minY, node.position.y)
maxX = Math.max(maxX, node.position.x + node.metrics.width)
maxY = Math.max(maxY, node.position.y + node.metrics.height)
}
return {
width: maxX - minX + CONTAINER_PADDING * 2,
height: maxY - minY + CONTAINER_PADDING * 2,
}
}
/**
* Checks if a block type is a container (loop or parallel)
*/
@@ -314,6 +369,7 @@ export type LayoutFunction = (
horizontalSpacing?: number
verticalSpacing?: number
padding?: { x: number; y: number }
gridSize?: number
}
subflowDepths?: Map<string, number>
}
@@ -329,13 +385,15 @@ export type LayoutFunction = (
* @param layoutFn - The layout function to use for calculating dimensions
* @param horizontalSpacing - Horizontal spacing between blocks
* @param verticalSpacing - Vertical spacing between blocks
* @param gridSize - Optional grid size for snap-to-grid
*/
export function prepareContainerDimensions(
blocks: Record<string, BlockState>,
edges: Edge[],
layoutFn: LayoutFunction,
horizontalSpacing: number,
verticalSpacing: number
verticalSpacing: number,
gridSize?: number
): void {
const { children } = getBlocksByParent(blocks)
@@ -402,6 +460,7 @@ export function prepareContainerDimensions(
layoutOptions: {
horizontalSpacing: horizontalSpacing * 0.85,
verticalSpacing,
gridSize,
},
})

View File

@@ -102,7 +102,7 @@ export const azureOpenAIProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

View File

@@ -77,7 +77,7 @@ export const cerebrasProvider: ProviderConfig = {
messages: allMessages,
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {
type: 'json_schema',

View File

@@ -81,7 +81,7 @@ export const deepseekProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null

View File

@@ -349,7 +349,7 @@ export async function executeGeminiRequest(
if (request.temperature !== undefined) {
geminiConfig.temperature = request.temperature
}
if (request.maxTokens !== undefined) {
if (request.maxTokens != null) {
geminiConfig.maxOutputTokens = request.maxTokens
}
if (systemInstruction) {

View File

@@ -123,17 +123,21 @@ export function extractFunctionCallPart(candidate: Candidate | undefined): Part
}
/**
* Converts usage metadata from SDK response to our format
* Converts usage metadata from SDK response to our format.
* Per Gemini docs, total = promptTokenCount + candidatesTokenCount + toolUsePromptTokenCount + thoughtsTokenCount
* We include toolUsePromptTokenCount in input and thoughtsTokenCount in output for correct billing.
*/
export function convertUsageMetadata(
usageMetadata: GenerateContentResponseUsageMetadata | undefined
): GeminiUsage {
const promptTokenCount = usageMetadata?.promptTokenCount ?? 0
const candidatesTokenCount = usageMetadata?.candidatesTokenCount ?? 0
const thoughtsTokenCount = usageMetadata?.thoughtsTokenCount ?? 0
const toolUsePromptTokenCount = usageMetadata?.toolUsePromptTokenCount ?? 0
const promptTokenCount = (usageMetadata?.promptTokenCount ?? 0) + toolUsePromptTokenCount
const candidatesTokenCount = (usageMetadata?.candidatesTokenCount ?? 0) + thoughtsTokenCount
return {
promptTokenCount,
candidatesTokenCount,
totalTokenCount: usageMetadata?.totalTokenCount ?? promptTokenCount + candidatesTokenCount,
totalTokenCount: usageMetadata?.totalTokenCount ?? 0,
}
}

View File

@@ -74,7 +74,7 @@ export const groqProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -91,7 +91,7 @@ export const mistralProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -1130,7 +1130,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
id: 'cerebras',
name: 'Cerebras',
description: 'Cerebras Cloud LLMs',
defaultModel: 'cerebras/llama-3.3-70b',
defaultModel: 'cerebras/gpt-oss-120b',
modelPatterns: [/^cerebras/],
icon: CerebrasIcon,
capabilities: {
@@ -1138,44 +1138,64 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
},
models: [
{
id: 'cerebras/llama-3.1-8b',
id: 'cerebras/gpt-oss-120b',
pricing: {
input: 0.35,
output: 0.75,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
{
id: 'cerebras/llama3.1-8b',
pricing: {
input: 0.1,
output: 0.1,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 32000,
},
{
id: 'cerebras/llama-3.1-70b',
pricing: {
input: 0.6,
output: 0.6,
updatedAt: '2025-10-11',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'cerebras/llama-3.3-70b',
pricing: {
input: 0.6,
output: 0.6,
updatedAt: '2025-10-11',
input: 0.85,
output: 1.2,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'cerebras/llama-4-scout-17b-16e-instruct',
id: 'cerebras/qwen-3-32b',
pricing: {
input: 0.11,
output: 0.34,
updatedAt: '2025-10-11',
input: 0.4,
output: 0.8,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 10000000,
contextWindow: 131000,
},
{
id: 'cerebras/qwen-3-235b-a22b-instruct-2507',
pricing: {
input: 0.6,
output: 1.2,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
{
id: 'cerebras/zai-glm-4.7',
pricing: {
input: 2.25,
output: 2.75,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131000,
},
],
},
@@ -1194,8 +1214,8 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
id: 'groq/openai/gpt-oss-120b',
pricing: {
input: 0.15,
output: 0.75,
updatedAt: '2025-10-11',
output: 0.6,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1203,9 +1223,29 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
{
id: 'groq/openai/gpt-oss-20b',
pricing: {
input: 0.01,
output: 0.25,
updatedAt: '2025-10-11',
input: 0.075,
output: 0.3,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/openai/gpt-oss-safeguard-20b',
pricing: {
input: 0.075,
output: 0.3,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/qwen/qwen3-32b',
pricing: {
input: 0.29,
output: 0.59,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1215,7 +1255,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.05,
output: 0.08,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1225,27 +1265,17 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.59,
output: 0.79,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/llama-4-scout-17b-instruct',
id: 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
pricing: {
input: 0.11,
output: 0.34,
updatedAt: '2025-10-11',
},
capabilities: {},
contextWindow: 131072,
},
{
id: 'groq/llama-4-maverick-17b-instruct',
pricing: {
input: 0.5,
output: 0.77,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1253,9 +1283,9 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
{
id: 'groq/meta-llama/llama-4-maverick-17b-128e-instruct',
pricing: {
input: 0.5,
output: 0.77,
updatedAt: '2025-10-11',
input: 0.2,
output: 0.6,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
@@ -1265,7 +1295,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.04,
output: 0.04,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 8192,
@@ -1275,27 +1305,37 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
pricing: {
input: 0.59,
output: 0.79,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'groq/moonshotai/kimi-k2-instruct',
id: 'groq/deepseek-r1-distill-qwen-32b',
pricing: {
input: 0.69,
output: 0.69,
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 128000,
},
{
id: 'groq/moonshotai/kimi-k2-instruct-0905',
pricing: {
input: 1.0,
output: 3.0,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,
contextWindow: 262144,
},
{
id: 'groq/meta-llama/llama-guard-4-12b',
pricing: {
input: 0.2,
output: 0.2,
updatedAt: '2025-10-11',
updatedAt: '2026-01-27',
},
capabilities: {},
contextWindow: 131072,

View File

@@ -105,7 +105,7 @@ export const ollamaProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -81,7 +81,7 @@ export const openaiProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
if (request.verbosity !== undefined) payload.verbosity = request.verbosity

View File

@@ -121,7 +121,7 @@ export const openRouterProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
let hasActiveTools = false

View File

@@ -135,7 +135,7 @@ export const vllmProvider: ProviderConfig = {
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
if (request.responseFormat) {
payload.response_format = {

View File

@@ -92,7 +92,7 @@ export const xAIProvider: ProviderConfig = {
}
if (request.temperature !== undefined) basePayload.temperature = request.temperature
if (request.maxTokens !== undefined) basePayload.max_tokens = request.maxTokens
if (request.maxTokens != null) basePayload.max_completion_tokens = request.maxTokens
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
if (tools?.length) {

View File

@@ -1,15 +1,155 @@
import { RepeatIcon, SplitIcon } from 'lucide-react'
import { create } from 'zustand'
import type { SearchModalState } from './types'
import { devtools } from 'zustand/middleware'
import { getToolOperationsIndex } from '@/lib/search/tool-operations'
import { getTriggersForSidebar } from '@/lib/workflows/triggers/trigger-utils'
import { getAllBlocks } from '@/blocks'
import type {
SearchBlockItem,
SearchData,
SearchDocItem,
SearchModalState,
SearchToolOperationItem,
} from './types'
export const useSearchModalStore = create<SearchModalState>((set) => ({
isOpen: false,
setOpen: (open: boolean) => {
set({ isOpen: open })
},
open: () => {
set({ isOpen: true })
},
close: () => {
set({ isOpen: false })
},
}))
const initialData: SearchData = {
blocks: [],
tools: [],
triggers: [],
toolOperations: [],
docs: [],
isInitialized: false,
}
export const useSearchModalStore = create<SearchModalState>()(
devtools(
(set, get) => ({
isOpen: false,
data: initialData,
setOpen: (open: boolean) => {
set({ isOpen: open })
},
open: () => {
set({ isOpen: true })
},
close: () => {
set({ isOpen: false })
},
initializeData: (filterBlocks) => {
const allBlocks = getAllBlocks()
const filteredAllBlocks = filterBlocks(allBlocks) as typeof allBlocks
const regularBlocks: SearchBlockItem[] = []
const tools: SearchBlockItem[] = []
const docs: SearchDocItem[] = []
for (const block of filteredAllBlocks) {
if (block.hideFromToolbar) continue
const searchItem: SearchBlockItem = {
id: block.type,
name: block.name,
description: block.description || '',
icon: block.icon,
bgColor: block.bgColor || '#6B7280',
type: block.type,
}
if (block.category === 'blocks' && block.type !== 'starter') {
regularBlocks.push(searchItem)
} else if (block.category === 'tools') {
tools.push(searchItem)
}
if (block.docsLink) {
docs.push({
id: `docs-${block.type}`,
name: block.name,
icon: block.icon,
href: block.docsLink,
})
}
}
const specialBlocks: SearchBlockItem[] = [
{
id: 'loop',
name: 'Loop',
description: 'Create a Loop',
icon: RepeatIcon,
bgColor: '#2FB3FF',
type: 'loop',
},
{
id: 'parallel',
name: 'Parallel',
description: 'Parallel Execution',
icon: SplitIcon,
bgColor: '#FEE12B',
type: 'parallel',
},
]
const blocks = [...regularBlocks, ...(filterBlocks(specialBlocks) as SearchBlockItem[])]
const allTriggers = getTriggersForSidebar()
const filteredTriggers = filterBlocks(allTriggers) as typeof allTriggers
const priorityOrder = ['Start', 'Schedule', 'Webhook']
const sortedTriggers = [...filteredTriggers].sort((a, b) => {
const aIndex = priorityOrder.indexOf(a.name)
const bIndex = priorityOrder.indexOf(b.name)
const aHasPriority = aIndex !== -1
const bHasPriority = bIndex !== -1
if (aHasPriority && bHasPriority) return aIndex - bIndex
if (aHasPriority) return -1
if (bHasPriority) return 1
return a.name.localeCompare(b.name)
})
const triggers = sortedTriggers.map(
(block): SearchBlockItem => ({
id: block.type,
name: block.name,
description: block.description || '',
icon: block.icon,
bgColor: block.bgColor || '#6B7280',
type: block.type,
config: block,
})
)
const allowedBlockTypes = new Set(tools.map((t) => t.type))
const toolOperations: SearchToolOperationItem[] = getToolOperationsIndex()
.filter((op) => allowedBlockTypes.has(op.blockType))
.map((op) => ({
id: op.id,
name: `${op.serviceName}: ${op.operationName}`,
searchValue: `${op.serviceName} ${op.operationName}`,
icon: op.icon,
bgColor: op.bgColor,
blockType: op.blockType,
operationId: op.operationId,
keywords: op.aliases,
}))
set({
data: {
blocks,
tools,
triggers,
toolOperations,
docs,
isInitialized: true,
},
})
},
}),
{ name: 'search-modal-store' }
)
)

View File

@@ -1,3 +1,55 @@
import type { ComponentType } from 'react'
import type { BlockConfig } from '@/blocks/types'
/**
* Represents a block item in the search results.
*/
export interface SearchBlockItem {
id: string
name: string
description: string
icon: ComponentType<{ className?: string }>
bgColor: string
type: string
config?: BlockConfig
}
/**
* Represents a tool operation item in the search results.
*/
export interface SearchToolOperationItem {
id: string
name: string
searchValue: string
icon: ComponentType<{ className?: string }>
bgColor: string
blockType: string
operationId: string
keywords: string[]
}
/**
* Represents a doc item in the search results.
*/
export interface SearchDocItem {
id: string
name: string
icon: ComponentType<{ className?: string }>
href: string
}
/**
* Pre-computed search data that is initialized on app load.
*/
export interface SearchData {
blocks: SearchBlockItem[]
tools: SearchBlockItem[]
triggers: SearchBlockItem[]
toolOperations: SearchToolOperationItem[]
docs: SearchDocItem[]
isInitialized: boolean
}
/**
* Global state for the universal search modal.
*
@@ -8,18 +60,27 @@
export interface SearchModalState {
/** Whether the search modal is currently open. */
isOpen: boolean
/** Pre-computed search data. */
data: SearchData
/**
* Explicitly set the open state of the modal.
*
* @param open - New open state.
*/
setOpen: (open: boolean) => void
/**
* Convenience method to open the modal.
*/
open: () => void
/**
* Convenience method to close the modal.
*/
close: () => void
/**
* Initialize search data. Called once on app load.
*/
initializeData: (filterBlocks: <T extends { type: string }>(blocks: T[]) => T[]) => void
}

View File

@@ -0,0 +1,84 @@
{{- if .Values.certManager.enabled }}
{{- /*
cert-manager Issuer Bootstrap Pattern
PREREQUISITE: cert-manager must be installed in your cluster before enabling this.
The root CA Certificate is created in the namespace specified by certManager.rootCA.namespace
(defaults to "cert-manager"). Ensure this namespace exists and cert-manager is running there.
Install cert-manager: https://cert-manager.io/docs/installation/
This implements the recommended pattern from cert-manager documentation:
1. A self-signed ClusterIssuer (for bootstrapping the root CA only)
2. A root CA Certificate (self-signed, used to sign other certificates)
3. A CA ClusterIssuer (uses the root CA to sign certificates)
Reference: https://cert-manager.io/docs/configuration/selfsigned/
*/ -}}
---
# 1. Self-Signed ClusterIssuer (Bootstrap Only)
# This issuer is used ONLY to create the root CA certificate.
# It should NOT be used directly for application certificates.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ .Values.certManager.selfSignedIssuer.name }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: cert-manager
spec:
selfSigned: {}
---
# 2. Root CA Certificate
# This certificate is signed by the self-signed issuer and becomes the root of trust.
# The secret created here will be used by the CA issuer to sign certificates.
# NOTE: This must be created in the cert-manager namespace (or the namespace specified
# in certManager.rootCA.namespace). Ensure cert-manager is installed there first.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ .Values.certManager.rootCA.certificateName }}
namespace: {{ .Values.certManager.rootCA.namespace | default "cert-manager" }} # Must match cert-manager's cluster-resource-namespace
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: cert-manager
spec:
isCA: true
commonName: {{ .Values.certManager.rootCA.commonName }}
secretName: {{ .Values.certManager.rootCA.secretName }}
duration: {{ .Values.certManager.rootCA.duration | default "87600h" }}
renewBefore: {{ .Values.certManager.rootCA.renewBefore | default "2160h" }}
privateKey:
algorithm: {{ .Values.certManager.rootCA.privateKey.algorithm | default "RSA" }}
size: {{ .Values.certManager.rootCA.privateKey.size | default 4096 }}
subject:
organizations:
{{- if .Values.certManager.rootCA.subject.organizations }}
{{- toYaml .Values.certManager.rootCA.subject.organizations | nindent 6 }}
{{- else }}
- {{ .Release.Name }}
{{- end }}
issuerRef:
name: {{ .Values.certManager.selfSignedIssuer.name }}
kind: ClusterIssuer
group: cert-manager.io
---
# 3. CA ClusterIssuer
# This is the issuer that should be used by applications to obtain certificates.
# It signs certificates using the root CA created above.
# NOTE: This issuer may briefly show "not ready" on first install while cert-manager
# processes the Certificate above and creates the secret. It will auto-reconcile.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: {{ .Values.certManager.caIssuer.name }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: cert-manager
spec:
ca:
secretName: {{ .Values.certManager.rootCA.secretName }}
{{- end }}

View File

@@ -1,6 +1,36 @@
{{- if and .Values.ollama.enabled .Values.ollama.gpu.enabled }}
---
# NVIDIA Device Plugin DaemonSet for GPU support
# 1. ConfigMap for NVIDIA Device Plugin Configuration
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "sim.fullname" . }}-nvidia-device-plugin-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: nvidia-device-plugin
data:
config.yaml: |
version: v1
flags:
{{- if eq .Values.ollama.gpu.strategy "mig" }}
migStrategy: "single"
{{- else }}
migStrategy: "none"
{{- end }}
failOnInitError: false
plugin:
passDeviceSpecs: true
deviceListStrategy: envvar
{{- if eq .Values.ollama.gpu.strategy "time-slicing" }}
sharing:
timeSlicing:
resources:
- name: nvidia.com/gpu
replicas: {{ .Values.ollama.gpu.timeSlicingReplicas | default 5 }}
{{- end }}
---
# 2. NVIDIA Device Plugin DaemonSet for GPU support
apiVersion: apps/v1
kind: DaemonSet
metadata:
@@ -35,9 +65,6 @@ spec:
# Only schedule on nodes with NVIDIA GPUs
accelerator: nvidia
priorityClassName: system-node-critical
runtimeClassName: nvidia
hostNetwork: true
hostPID: true
volumes:
- name: device-plugin
hostPath:
@@ -48,22 +75,21 @@ spec:
- name: sys
hostPath:
path: /sys
- name: proc-driver-nvidia
hostPath:
path: /proc/driver/nvidia
# Volume to mount the ConfigMap
- name: nvidia-device-plugin-config
configMap:
name: {{ include "sim.fullname" . }}-nvidia-device-plugin-config
containers:
- name: nvidia-device-plugin
image: nvcr.io/nvidia/k8s-device-plugin:v0.14.5
image: nvcr.io/nvidia/k8s-device-plugin:v0.18.2
imagePullPolicy: Always
args:
- --mig-strategy=single
- --pass-device-specs=true
- --fail-on-init-error=false
- --device-list-strategy=envvar
- --nvidia-driver-root=/host-sys/fs/cgroup
- "--config-file=/etc/device-plugin/config.yaml"
{{- if eq .Values.ollama.gpu.strategy "mig" }}
env:
- name: NVIDIA_MIG_MONITOR_DEVICES
value: all
{{- end }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
@@ -74,29 +100,16 @@ spec:
- name: dev
mountPath: /dev
- name: sys
mountPath: /host-sys
mountPath: /sys
readOnly: true
- name: proc-driver-nvidia
mountPath: /proc/driver/nvidia
- name: nvidia-device-plugin-config
mountPath: /etc/device-plugin/
readOnly: true
resources:
requests:
cpu: 50m
memory: 10Mi
memory: 20Mi
limits:
cpu: 50m
memory: 20Mi
{{- if .Values.nodeSelector }}
nodeSelector:
{{- toYaml .Values.nodeSelector | nindent 8 }}
{{- end }}
---
# RuntimeClass for NVIDIA Container Runtime
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: {{ include "sim.fullname" . }}-nvidia
labels:
{{- include "sim.labels" . | nindent 4 }}
handler: nvidia
{{- end }}
memory: 50Mi
{{- end }}

View File

@@ -400,8 +400,10 @@ postgresql:
algorithm: RSA # RSA or ECDSA
size: 4096 # Key size in bits
# Issuer reference (REQUIRED if tls.enabled is true)
# By default, references the CA issuer created by certManager.caIssuer
# Make sure certManager.enabled is true, or provide your own issuer
issuerRef:
name: selfsigned-cluster-issuer # Name of your cert-manager Issuer/ClusterIssuer
name: sim-ca-issuer # Name of your cert-manager Issuer/ClusterIssuer
kind: ClusterIssuer # ClusterIssuer or Issuer
group: "" # Optional: cert-manager.io (leave empty for default)
# Additional DNS names (optional)
@@ -463,20 +465,26 @@ externalDatabase:
ollama:
# Enable/disable Ollama deployment
enabled: false
# Image configuration
image:
repository: ollama/ollama
tag: latest
pullPolicy: Always
# Number of replicas
replicaCount: 1
# GPU configuration
gpu:
enabled: false
count: 1
# GPU sharing strategy: "mig" (Multi-Instance GPU) or "time-slicing"
# - mig: Hardware-level GPU partitioning (requires supported GPUs like A100)
# - time-slicing: Software-level GPU sharing (works with most NVIDIA GPUs)
strategy: "time-slicing"
# Number of time-slicing replicas (only used when strategy is "time-slicing")
timeSlicingReplicas: 5
# Node selector for GPU workloads (adjust labels based on your cluster configuration)
nodeSelector:
@@ -1185,4 +1193,53 @@ externalSecrets:
# External database password (when using managed database services)
externalDatabase:
# Path to external database password in external store
password: ""
password: ""
# cert-manager configuration
# Prerequisites: Install cert-manager in your cluster first
# See: https://cert-manager.io/docs/installation/
#
# This implements the recommended CA bootstrap pattern from cert-manager:
# 1. Self-signed ClusterIssuer (bootstrap only - creates root CA)
# 2. Root CA Certificate (self-signed, becomes the trust anchor)
# 3. CA ClusterIssuer (signs application certificates using root CA)
#
# Reference: https://cert-manager.io/docs/configuration/selfsigned/
certManager:
# Enable/disable cert-manager issuer resources
enabled: false
# Self-signed ClusterIssuer (used ONLY to bootstrap the root CA)
# Do not reference this issuer directly for application certificates
selfSignedIssuer:
name: "sim-selfsigned-bootstrap-issuer"
# Root CA Certificate configuration
# This certificate is signed by the self-signed issuer and used as the trust anchor
rootCA:
# Name of the Certificate resource
certificateName: "sim-root-ca"
# Namespace where the root CA certificate and secret will be created
# Must match cert-manager's cluster-resource-namespace (default: cert-manager)
namespace: "cert-manager"
# Common name for the root CA certificate
commonName: "sim-root-ca"
# Secret name where the root CA certificate and key will be stored
secretName: "sim-root-ca-secret"
# Certificate validity duration (default: 10 years)
duration: "87600h"
# Renew before expiry (default: 90 days)
renewBefore: "2160h"
# Private key configuration
privateKey:
algorithm: RSA
size: 4096
# Subject configuration
subject:
organizations: []
# If empty, defaults to the release name
# CA ClusterIssuer configuration
# This is the issuer that applications should reference for obtaining certificates
caIssuer:
name: "sim-ca-issuer"