mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-28 08:18:09 -05:00
Compare commits
51 Commits
feat/run-f
...
fix/keyboa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5680fa0dd8 | ||
|
|
11dc18a80d | ||
|
|
0d0209a108 | ||
|
|
500dcd4734 | ||
|
|
8bdba373c6 | ||
|
|
c8ffda1616 | ||
|
|
b4a389a71f | ||
|
|
65bc21608c | ||
|
|
ef613ef035 | ||
|
|
20b76e67b3 | ||
|
|
7640fdf742 | ||
|
|
ab4e9dc72f | ||
|
|
1c58c35bd8 | ||
|
|
d63a5cb504 | ||
|
|
8bd5d41723 | ||
|
|
c12931bc50 | ||
|
|
e9c4251c1c | ||
|
|
cc2be33d6b | ||
|
|
45371e521e | ||
|
|
0ce0f98aa5 | ||
|
|
dff1c9d083 | ||
|
|
b09f683072 | ||
|
|
a8bb0db660 | ||
|
|
af82820a28 | ||
|
|
4372841797 | ||
|
|
5e8c843241 | ||
|
|
7bf3d73ee6 | ||
|
|
7ffc11a738 | ||
|
|
be578e2ed7 | ||
|
|
f415e5edc4 | ||
|
|
13a6e6c3fa | ||
|
|
f5ab7f21ae | ||
|
|
bfb6fffe38 | ||
|
|
4fbec0a43f | ||
|
|
585f5e365b | ||
|
|
3792bdd252 | ||
|
|
eb5d1f3e5b | ||
|
|
54ab82c8dd | ||
|
|
f895bf469b | ||
|
|
dd3209af06 | ||
|
|
b6ba3b50a7 | ||
|
|
b304233062 | ||
|
|
57e4b49bd6 | ||
|
|
e12dd204ed | ||
|
|
3d9d9cbc54 | ||
|
|
0f4ec962ad | ||
|
|
4827866f9a | ||
|
|
3e697d9ed9 | ||
|
|
4431a1a484 | ||
|
|
4d1a9a3f22 | ||
|
|
eb07a080fb |
@@ -55,21 +55,21 @@ export const {serviceName}{Action}Tool: ToolConfig<
|
||||
},
|
||||
|
||||
params: {
|
||||
// Hidden params (system-injected)
|
||||
// Hidden params (system-injected, only use hidden for oauth accessToken)
|
||||
accessToken: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'hidden',
|
||||
description: 'OAuth access token',
|
||||
},
|
||||
// User-only params (credentials, IDs user must provide)
|
||||
// User-only params (credentials, api key, IDs user must provide)
|
||||
someId: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'The ID of the resource',
|
||||
},
|
||||
// User-or-LLM params (can be provided by user OR computed by LLM)
|
||||
// User-or-LLM params (everything else, can be provided by user OR computed by LLM)
|
||||
query: {
|
||||
type: 'string',
|
||||
required: false, // Use false for optional
|
||||
@@ -114,8 +114,8 @@ export const {serviceName}{Action}Tool: ToolConfig<
|
||||
|
||||
### Visibility Options
|
||||
- `'hidden'` - System-injected (OAuth tokens, internal params). User never sees.
|
||||
- `'user-only'` - User must provide (credentials, account-specific IDs)
|
||||
- `'user-or-llm'` - User provides OR LLM can compute (search queries, content, filters)
|
||||
- `'user-only'` - User must provide (credentials, api keys, account-specific IDs)
|
||||
- `'user-or-llm'` - User provides OR LLM can compute (search queries, content, filters, most fall into this category)
|
||||
|
||||
### Parameter Types
|
||||
- `'string'` - Text values
|
||||
|
||||
@@ -35,8 +35,7 @@ const AutoLayoutRequestSchema = z.object({
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
// Optional: if provided, use these blocks instead of loading from DB
|
||||
// This allows using blocks with live measurements from the UI
|
||||
gridSize: z.number().min(0).max(50).optional(),
|
||||
blocks: z.record(z.any()).optional(),
|
||||
edges: z.array(z.any()).optional(),
|
||||
loops: z.record(z.any()).optional(),
|
||||
@@ -53,7 +52,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
const { id: workflowId } = await params
|
||||
|
||||
try {
|
||||
// Get the session
|
||||
const session = await getSession()
|
||||
if (!session?.user?.id) {
|
||||
logger.warn(`[${requestId}] Unauthorized autolayout attempt for workflow ${workflowId}`)
|
||||
@@ -62,7 +60,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
|
||||
const userId = session.user.id
|
||||
|
||||
// Parse request body
|
||||
const body = await request.json()
|
||||
const layoutOptions = AutoLayoutRequestSchema.parse(body)
|
||||
|
||||
@@ -70,7 +67,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
userId,
|
||||
})
|
||||
|
||||
// Fetch the workflow to check ownership/access
|
||||
const accessContext = await getWorkflowAccessContext(workflowId, userId)
|
||||
const workflowData = accessContext?.workflow
|
||||
|
||||
@@ -79,7 +75,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
return NextResponse.json({ error: 'Workflow not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
// Check if user has permission to update this workflow
|
||||
const canUpdate =
|
||||
accessContext?.isOwner ||
|
||||
(workflowData.workspaceId
|
||||
@@ -94,8 +89,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
return NextResponse.json({ error: 'Access denied' }, { status: 403 })
|
||||
}
|
||||
|
||||
// Use provided blocks/edges if available (with live measurements from UI),
|
||||
// otherwise load from database
|
||||
let currentWorkflowData: NormalizedWorkflowData | null
|
||||
|
||||
if (layoutOptions.blocks && layoutOptions.edges) {
|
||||
@@ -125,6 +118,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
y: layoutOptions.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
|
||||
},
|
||||
alignment: layoutOptions.alignment,
|
||||
gridSize: layoutOptions.gridSize,
|
||||
}
|
||||
|
||||
const layoutResult = applyAutoLayout(
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { applyAutoLayout } from '@/lib/workflows/autolayout'
|
||||
import {
|
||||
DEFAULT_HORIZONTAL_SPACING,
|
||||
DEFAULT_LAYOUT_PADDING,
|
||||
DEFAULT_VERTICAL_SPACING,
|
||||
} from '@/lib/workflows/autolayout/constants'
|
||||
|
||||
const logger = createLogger('YamlAutoLayoutAPI')
|
||||
|
||||
const AutoLayoutRequestSchema = z.object({
|
||||
workflowState: z.object({
|
||||
blocks: z.record(z.any()),
|
||||
edges: z.array(z.any()),
|
||||
loops: z.record(z.any()).optional().default({}),
|
||||
parallels: z.record(z.any()).optional().default({}),
|
||||
}),
|
||||
options: z
|
||||
.object({
|
||||
spacing: z
|
||||
.object({
|
||||
horizontal: z.number().optional(),
|
||||
vertical: z.number().optional(),
|
||||
})
|
||||
.optional(),
|
||||
alignment: z.enum(['start', 'center', 'end']).optional(),
|
||||
padding: z
|
||||
.object({
|
||||
x: z.number().optional(),
|
||||
y: z.number().optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
.optional(),
|
||||
})
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { workflowState, options } = AutoLayoutRequestSchema.parse(body)
|
||||
|
||||
logger.info(`[${requestId}] Applying auto layout`, {
|
||||
blockCount: Object.keys(workflowState.blocks).length,
|
||||
edgeCount: workflowState.edges.length,
|
||||
})
|
||||
|
||||
const autoLayoutOptions = {
|
||||
horizontalSpacing: options?.spacing?.horizontal ?? DEFAULT_HORIZONTAL_SPACING,
|
||||
verticalSpacing: options?.spacing?.vertical ?? DEFAULT_VERTICAL_SPACING,
|
||||
padding: {
|
||||
x: options?.padding?.x ?? DEFAULT_LAYOUT_PADDING.x,
|
||||
y: options?.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
|
||||
},
|
||||
alignment: options?.alignment ?? 'center',
|
||||
}
|
||||
|
||||
const layoutResult = applyAutoLayout(
|
||||
workflowState.blocks,
|
||||
workflowState.edges,
|
||||
autoLayoutOptions
|
||||
)
|
||||
|
||||
if (!layoutResult.success || !layoutResult.blocks) {
|
||||
logger.error(`[${requestId}] Auto layout failed:`, {
|
||||
error: layoutResult.error,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
errors: [layoutResult.error || 'Unknown auto layout error'],
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Auto layout completed successfully:`, {
|
||||
success: true,
|
||||
blockCount: Object.keys(layoutResult.blocks).length,
|
||||
})
|
||||
|
||||
const transformedResponse = {
|
||||
success: true,
|
||||
workflowState: {
|
||||
blocks: layoutResult.blocks,
|
||||
edges: workflowState.edges,
|
||||
loops: workflowState.loops || {},
|
||||
parallels: workflowState.parallels || {},
|
||||
},
|
||||
}
|
||||
|
||||
return NextResponse.json(transformedResponse)
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Auto layout failed:`, error)
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
errors: [error instanceof Error ? error.message : 'Unknown auto layout error'],
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { createLogger } from '@sim/logger'
|
||||
import { useReactFlow } from 'reactflow'
|
||||
import type { AutoLayoutOptions } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
|
||||
import { applyAutoLayoutAndUpdateStore as applyAutoLayoutStandalone } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/auto-layout-utils'
|
||||
import { useSnapToGridSize } from '@/hooks/queries/general-settings'
|
||||
import { useCanvasViewport } from '@/hooks/use-canvas-viewport'
|
||||
|
||||
export type { AutoLayoutOptions }
|
||||
@@ -13,21 +14,28 @@ const logger = createLogger('useAutoLayout')
|
||||
* Hook providing auto-layout functionality for workflows.
|
||||
* Binds workflowId context and provides memoized callback for React components.
|
||||
* Includes automatic fitView animation after successful layout.
|
||||
* Automatically uses the user's snap-to-grid setting for grid-aligned layout.
|
||||
*
|
||||
* Note: This hook requires a ReactFlowProvider ancestor.
|
||||
*/
|
||||
export function useAutoLayout(workflowId: string | null) {
|
||||
const reactFlowInstance = useReactFlow()
|
||||
const { fitViewToBounds } = useCanvasViewport(reactFlowInstance)
|
||||
const snapToGridSize = useSnapToGridSize()
|
||||
|
||||
const applyAutoLayoutAndUpdateStore = useCallback(
|
||||
async (options: AutoLayoutOptions = {}) => {
|
||||
if (!workflowId) {
|
||||
return { success: false, error: 'No workflow ID provided' }
|
||||
}
|
||||
return applyAutoLayoutStandalone(workflowId, options)
|
||||
// Include gridSize from user's snap-to-grid setting
|
||||
const optionsWithGrid: AutoLayoutOptions = {
|
||||
...options,
|
||||
gridSize: options.gridSize ?? (snapToGridSize > 0 ? snapToGridSize : undefined),
|
||||
}
|
||||
return applyAutoLayoutStandalone(workflowId, optionsWithGrid)
|
||||
},
|
||||
[workflowId]
|
||||
[workflowId, snapToGridSize]
|
||||
)
|
||||
|
||||
/**
|
||||
|
||||
@@ -21,6 +21,7 @@ export interface AutoLayoutOptions {
|
||||
x?: number
|
||||
y?: number
|
||||
}
|
||||
gridSize?: number
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -62,6 +63,7 @@ export async function applyAutoLayoutAndUpdateStore(
|
||||
x: options.padding?.x ?? DEFAULT_LAYOUT_PADDING.x,
|
||||
y: options.padding?.y ?? DEFAULT_LAYOUT_PADDING.y,
|
||||
},
|
||||
gridSize: options.gridSize,
|
||||
}
|
||||
|
||||
// Call the autolayout API route
|
||||
|
||||
@@ -2302,33 +2302,12 @@ const WorkflowContent = React.memo(() => {
|
||||
window.removeEventListener('remove-from-subflow', handleRemoveFromSubflow as EventListener)
|
||||
}, [blocks, edgesForDisplay, getNodeAbsolutePosition, collaborativeBatchUpdateParent])
|
||||
|
||||
/** Handles node changes - applies changes and resolves parent-child selection conflicts. */
|
||||
const onNodesChange = useCallback(
|
||||
(changes: NodeChange[]) => {
|
||||
selectedIdsRef.current = null
|
||||
setDisplayNodes((nds) => {
|
||||
const updated = applyNodeChanges(changes, nds)
|
||||
const hasSelectionChange = changes.some((c) => c.type === 'select')
|
||||
if (!hasSelectionChange) return updated
|
||||
const resolved = resolveParentChildSelectionConflicts(updated, blocks)
|
||||
selectedIdsRef.current = resolved.filter((node) => node.selected).map((node) => node.id)
|
||||
return resolved
|
||||
})
|
||||
const selectedIds = selectedIdsRef.current as string[] | null
|
||||
if (selectedIds !== null) {
|
||||
syncPanelWithSelection(selectedIds)
|
||||
}
|
||||
},
|
||||
[blocks]
|
||||
)
|
||||
|
||||
/**
|
||||
* Updates container dimensions in displayNodes during drag.
|
||||
* This allows live resizing of containers as their children are dragged.
|
||||
* Updates container dimensions in displayNodes during drag or keyboard movement.
|
||||
*/
|
||||
const updateContainerDimensionsDuringDrag = useCallback(
|
||||
(draggedNodeId: string, draggedNodePosition: { x: number; y: number }) => {
|
||||
const parentId = blocks[draggedNodeId]?.data?.parentId
|
||||
const updateContainerDimensionsDuringMove = useCallback(
|
||||
(movedNodeId: string, movedNodePosition: { x: number; y: number }) => {
|
||||
const parentId = blocks[movedNodeId]?.data?.parentId
|
||||
if (!parentId) return
|
||||
|
||||
setDisplayNodes((currentNodes) => {
|
||||
@@ -2336,7 +2315,7 @@ const WorkflowContent = React.memo(() => {
|
||||
if (childNodes.length === 0) return currentNodes
|
||||
|
||||
const childPositions = childNodes.map((node) => {
|
||||
const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position
|
||||
const nodePosition = node.id === movedNodeId ? movedNodePosition : node.position
|
||||
const { width, height } = getBlockDimensions(node.id)
|
||||
return { x: nodePosition.x, y: nodePosition.y, width, height }
|
||||
})
|
||||
@@ -2367,6 +2346,34 @@ const WorkflowContent = React.memo(() => {
|
||||
[blocks, getBlockDimensions]
|
||||
)
|
||||
|
||||
/** Handles node changes - applies changes and resolves parent-child selection conflicts. */
|
||||
const onNodesChange = useCallback(
|
||||
(changes: NodeChange[]) => {
|
||||
selectedIdsRef.current = null
|
||||
setDisplayNodes((nds) => {
|
||||
const updated = applyNodeChanges(changes, nds)
|
||||
const hasSelectionChange = changes.some((c) => c.type === 'select')
|
||||
if (!hasSelectionChange) return updated
|
||||
const resolved = resolveParentChildSelectionConflicts(updated, blocks)
|
||||
selectedIdsRef.current = resolved.filter((node) => node.selected).map((node) => node.id)
|
||||
return resolved
|
||||
})
|
||||
const selectedIds = selectedIdsRef.current as string[] | null
|
||||
if (selectedIds !== null) {
|
||||
syncPanelWithSelection(selectedIds)
|
||||
}
|
||||
|
||||
// Handle position changes (e.g., from keyboard arrow key movement)
|
||||
// Update container dimensions when child nodes are moved
|
||||
for (const change of changes) {
|
||||
if (change.type === 'position' && 'position' in change && change.position) {
|
||||
updateContainerDimensionsDuringMove(change.id, change.position)
|
||||
}
|
||||
}
|
||||
},
|
||||
[blocks, updateContainerDimensionsDuringMove]
|
||||
)
|
||||
|
||||
/**
|
||||
* Effect to resize loops when nodes change (add/remove/position change).
|
||||
* Runs on structural changes only - not during drag (position-only changes).
|
||||
@@ -2611,7 +2618,7 @@ const WorkflowContent = React.memo(() => {
|
||||
|
||||
// If the node is inside a container, update container dimensions during drag
|
||||
if (currentParentId) {
|
||||
updateContainerDimensionsDuringDrag(node.id, node.position)
|
||||
updateContainerDimensionsDuringMove(node.id, node.position)
|
||||
}
|
||||
|
||||
// Check if this is a starter block - starter blocks should never be in containers
|
||||
@@ -2728,7 +2735,7 @@ const WorkflowContent = React.memo(() => {
|
||||
blocks,
|
||||
getNodeAbsolutePosition,
|
||||
getNodeDepth,
|
||||
updateContainerDimensionsDuringDrag,
|
||||
updateContainerDimensionsDuringMove,
|
||||
highlightContainerNode,
|
||||
]
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ import type {
|
||||
} from '@/executor/types'
|
||||
import { streamingResponseFormatProcessor } from '@/executor/utils'
|
||||
import { buildBlockExecutionError, normalizeError } from '@/executor/utils/errors'
|
||||
import { isJSONString } from '@/executor/utils/json'
|
||||
import { filterOutputForLog } from '@/executor/utils/output-filter'
|
||||
import { validateBlockType } from '@/executor/utils/permission-check'
|
||||
import type { VariableResolver } from '@/executor/variables/resolver'
|
||||
@@ -86,7 +87,7 @@ export class BlockExecutor {
|
||||
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
|
||||
|
||||
if (blockLog) {
|
||||
blockLog.input = resolvedInputs
|
||||
blockLog.input = this.parseJsonInputs(resolvedInputs)
|
||||
}
|
||||
} catch (error) {
|
||||
cleanupSelfReference?.()
|
||||
@@ -157,7 +158,14 @@ export class BlockExecutor {
|
||||
const displayOutput = filterOutputForLog(block.metadata?.id || '', normalizedOutput, {
|
||||
block,
|
||||
})
|
||||
this.callOnBlockComplete(ctx, node, block, resolvedInputs, displayOutput, duration)
|
||||
this.callOnBlockComplete(
|
||||
ctx,
|
||||
node,
|
||||
block,
|
||||
this.parseJsonInputs(resolvedInputs),
|
||||
displayOutput,
|
||||
duration
|
||||
)
|
||||
}
|
||||
|
||||
return normalizedOutput
|
||||
@@ -233,7 +241,7 @@ export class BlockExecutor {
|
||||
blockLog.durationMs = duration
|
||||
blockLog.success = false
|
||||
blockLog.error = errorMessage
|
||||
blockLog.input = input
|
||||
blockLog.input = this.parseJsonInputs(input)
|
||||
blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
|
||||
}
|
||||
|
||||
@@ -248,7 +256,14 @@ export class BlockExecutor {
|
||||
|
||||
if (!isSentinel) {
|
||||
const displayOutput = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
|
||||
this.callOnBlockComplete(ctx, node, block, input, displayOutput, duration)
|
||||
this.callOnBlockComplete(
|
||||
ctx,
|
||||
node,
|
||||
block,
|
||||
this.parseJsonInputs(input),
|
||||
displayOutput,
|
||||
duration
|
||||
)
|
||||
}
|
||||
|
||||
const hasErrorPort = this.hasErrorPortEdge(node)
|
||||
@@ -336,6 +351,36 @@ export class BlockExecutor {
|
||||
return { result: output }
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse JSON string inputs to objects for log display only.
|
||||
* Attempts to parse any string that looks like JSON.
|
||||
* Returns a new object - does not mutate the original inputs.
|
||||
*/
|
||||
private parseJsonInputs(inputs: Record<string, any>): Record<string, any> {
|
||||
let result = inputs
|
||||
let hasChanges = false
|
||||
|
||||
for (const [key, value] of Object.entries(inputs)) {
|
||||
// isJSONString is a quick heuristic (checks for { or [), not a validator.
|
||||
// Invalid JSON is safely caught below - this just avoids JSON.parse on every string.
|
||||
if (typeof value !== 'string' || !isJSONString(value)) {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
if (!hasChanges) {
|
||||
result = { ...inputs }
|
||||
hasChanges = true
|
||||
}
|
||||
result[key] = JSON.parse(value.trim())
|
||||
} catch {
|
||||
// Not valid JSON, keep original string
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
private callOnBlockStart(ctx: ExecutionContext, node: DAGNode, block: SerializedBlock): void {
|
||||
const blockId = node.id
|
||||
const blockName = block.metadata?.name ?? blockId
|
||||
|
||||
@@ -936,8 +936,12 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
systemPrompt: validMessages ? undefined : inputs.systemPrompt,
|
||||
context: validMessages ? undefined : stringifyJSON(messages),
|
||||
tools: formattedTools,
|
||||
temperature: inputs.temperature,
|
||||
maxTokens: inputs.maxTokens,
|
||||
temperature:
|
||||
inputs.temperature != null && inputs.temperature !== ''
|
||||
? Number(inputs.temperature)
|
||||
: undefined,
|
||||
maxTokens:
|
||||
inputs.maxTokens != null && inputs.maxTokens !== '' ? Number(inputs.maxTokens) : undefined,
|
||||
apiKey: inputs.apiKey,
|
||||
azureEndpoint: inputs.azureEndpoint,
|
||||
azureApiVersion: inputs.azureApiVersion,
|
||||
|
||||
@@ -14,8 +14,8 @@ export interface AgentInputs {
|
||||
slidingWindowSize?: string // For message-based sliding window
|
||||
slidingWindowTokens?: string // For token-based sliding window
|
||||
// LLM parameters
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
temperature?: string
|
||||
maxTokens?: string
|
||||
apiKey?: string
|
||||
azureEndpoint?: string
|
||||
azureApiVersion?: string
|
||||
|
||||
@@ -8,7 +8,7 @@ const ivm = require('isolated-vm')
|
||||
const USER_CODE_START_LINE = 4
|
||||
const pendingFetches = new Map()
|
||||
let fetchIdCounter = 0
|
||||
const FETCH_TIMEOUT_MS = 30000
|
||||
const FETCH_TIMEOUT_MS = 300000 // 5 minutes
|
||||
|
||||
/**
|
||||
* Extract line and column from error stack or message
|
||||
|
||||
@@ -34,6 +34,7 @@ export function layoutContainers(
|
||||
: DEFAULT_CONTAINER_HORIZONTAL_SPACING,
|
||||
verticalSpacing: options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING,
|
||||
padding: { x: CONTAINER_PADDING_X, y: CONTAINER_PADDING_Y },
|
||||
gridSize: options.gridSize,
|
||||
}
|
||||
|
||||
for (const [parentId, childIds] of children.entries()) {
|
||||
@@ -56,18 +57,15 @@ export function layoutContainers(
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the shared core layout function with container options
|
||||
const { nodes, dimensions } = layoutBlocksCore(childBlocks, childEdges, {
|
||||
isContainer: true,
|
||||
layoutOptions: containerOptions,
|
||||
})
|
||||
|
||||
// Apply positions back to blocks
|
||||
for (const node of nodes.values()) {
|
||||
blocks[node.id].position = node.position
|
||||
}
|
||||
|
||||
// Update container dimensions
|
||||
const calculatedWidth = dimensions.width
|
||||
const calculatedHeight = dimensions.height
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
getBlockMetrics,
|
||||
normalizePositions,
|
||||
prepareBlockMetrics,
|
||||
snapNodesToGrid,
|
||||
} from '@/lib/workflows/autolayout/utils'
|
||||
import { BLOCK_DIMENSIONS, HANDLE_POSITIONS } from '@/lib/workflows/blocks/block-dimensions'
|
||||
import { EDGE } from '@/executor/constants'
|
||||
@@ -84,7 +85,6 @@ export function assignLayers(
|
||||
): Map<string, GraphNode> {
|
||||
const nodes = new Map<string, GraphNode>()
|
||||
|
||||
// Initialize nodes
|
||||
for (const [id, block] of Object.entries(blocks)) {
|
||||
nodes.set(id, {
|
||||
id,
|
||||
@@ -97,7 +97,6 @@ export function assignLayers(
|
||||
})
|
||||
}
|
||||
|
||||
// Build a map of target node -> edges coming into it (to check sourceHandle later)
|
||||
const incomingEdgesMap = new Map<string, Edge[]>()
|
||||
for (const edge of edges) {
|
||||
if (!incomingEdgesMap.has(edge.target)) {
|
||||
@@ -106,7 +105,6 @@ export function assignLayers(
|
||||
incomingEdgesMap.get(edge.target)!.push(edge)
|
||||
}
|
||||
|
||||
// Build adjacency from edges
|
||||
for (const edge of edges) {
|
||||
const sourceNode = nodes.get(edge.source)
|
||||
const targetNode = nodes.get(edge.target)
|
||||
@@ -117,7 +115,6 @@ export function assignLayers(
|
||||
}
|
||||
}
|
||||
|
||||
// Find starter nodes (no incoming edges)
|
||||
const starterNodes = Array.from(nodes.values()).filter((node) => node.incoming.size === 0)
|
||||
|
||||
if (starterNodes.length === 0 && nodes.size > 0) {
|
||||
@@ -126,7 +123,6 @@ export function assignLayers(
|
||||
logger.warn('No starter blocks found, using first block as starter', { blockId: firstNode.id })
|
||||
}
|
||||
|
||||
// Topological sort using Kahn's algorithm
|
||||
const inDegreeCount = new Map<string, number>()
|
||||
|
||||
for (const node of nodes.values()) {
|
||||
@@ -144,8 +140,6 @@ export function assignLayers(
|
||||
const node = nodes.get(nodeId)!
|
||||
processed.add(nodeId)
|
||||
|
||||
// Calculate layer based on max incoming layer + 1
|
||||
// For edges from subflow ends, add the subflow's internal depth (minus 1 to avoid double-counting)
|
||||
if (node.incoming.size > 0) {
|
||||
let maxEffectiveLayer = -1
|
||||
const incomingEdges = incomingEdgesMap.get(nodeId) || []
|
||||
@@ -153,16 +147,11 @@ export function assignLayers(
|
||||
for (const incomingId of node.incoming) {
|
||||
const incomingNode = nodes.get(incomingId)
|
||||
if (incomingNode) {
|
||||
// Find edges from this incoming node to check if it's a subflow end edge
|
||||
const edgesFromSource = incomingEdges.filter((e) => e.source === incomingId)
|
||||
let additionalDepth = 0
|
||||
|
||||
// Check if any edge from this source is a subflow end edge
|
||||
const hasSubflowEndEdge = edgesFromSource.some(isSubflowEndEdge)
|
||||
if (hasSubflowEndEdge && subflowDepths) {
|
||||
// Get the internal depth of the subflow
|
||||
// Subtract 1 because the +1 at the end of layer calculation already accounts for one layer
|
||||
// E.g., if subflow has 2 internal layers (depth=2), we add 1 extra so total offset is 2
|
||||
const depth = subflowDepths.get(incomingId) ?? 1
|
||||
additionalDepth = Math.max(0, depth - 1)
|
||||
}
|
||||
@@ -174,7 +163,6 @@ export function assignLayers(
|
||||
node.layer = maxEffectiveLayer + 1
|
||||
}
|
||||
|
||||
// Add outgoing nodes when all dependencies processed
|
||||
for (const targetId of node.outgoing) {
|
||||
const currentCount = inDegreeCount.get(targetId) || 0
|
||||
inDegreeCount.set(targetId, currentCount - 1)
|
||||
@@ -185,7 +173,6 @@ export function assignLayers(
|
||||
}
|
||||
}
|
||||
|
||||
// Handle isolated nodes
|
||||
for (const node of nodes.values()) {
|
||||
if (!processed.has(node.id)) {
|
||||
logger.debug('Isolated node detected, assigning to layer 0', { blockId: node.id })
|
||||
@@ -224,7 +211,6 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
|
||||
hasOverlap = false
|
||||
iteration++
|
||||
|
||||
// Group nodes by layer for same-layer overlap resolution
|
||||
const nodesByLayer = new Map<number, GraphNode[]>()
|
||||
for (const node of nodes) {
|
||||
if (!nodesByLayer.has(node.layer)) {
|
||||
@@ -233,11 +219,9 @@ function resolveVerticalOverlaps(nodes: GraphNode[], verticalSpacing: number): v
|
||||
nodesByLayer.get(node.layer)!.push(node)
|
||||
}
|
||||
|
||||
// Process each layer independently
|
||||
for (const [layer, layerNodes] of nodesByLayer) {
|
||||
if (layerNodes.length < 2) continue
|
||||
|
||||
// Sort by Y position for consistent processing
|
||||
layerNodes.sort((a, b) => a.position.y - b.position.y)
|
||||
|
||||
for (let i = 0; i < layerNodes.length - 1; i++) {
|
||||
@@ -302,7 +286,6 @@ export function calculatePositions(
|
||||
|
||||
const layerNumbers = Array.from(layers.keys()).sort((a, b) => a - b)
|
||||
|
||||
// Calculate max width for each layer
|
||||
const layerWidths = new Map<number, number>()
|
||||
for (const layerNum of layerNumbers) {
|
||||
const nodesInLayer = layers.get(layerNum)!
|
||||
@@ -310,7 +293,6 @@ export function calculatePositions(
|
||||
layerWidths.set(layerNum, maxWidth)
|
||||
}
|
||||
|
||||
// Calculate cumulative X positions for each layer based on actual widths
|
||||
const layerXPositions = new Map<number, number>()
|
||||
let cumulativeX = padding.x
|
||||
|
||||
@@ -319,7 +301,6 @@ export function calculatePositions(
|
||||
cumulativeX += layerWidths.get(layerNum)! + horizontalSpacing
|
||||
}
|
||||
|
||||
// Build a flat map of all nodes for quick lookups
|
||||
const allNodes = new Map<string, GraphNode>()
|
||||
for (const nodesInLayer of layers.values()) {
|
||||
for (const node of nodesInLayer) {
|
||||
@@ -327,7 +308,6 @@ export function calculatePositions(
|
||||
}
|
||||
}
|
||||
|
||||
// Build incoming edges map for handle lookups
|
||||
const incomingEdgesMap = new Map<string, Edge[]>()
|
||||
for (const edge of edges) {
|
||||
if (!incomingEdgesMap.has(edge.target)) {
|
||||
@@ -336,20 +316,16 @@ export function calculatePositions(
|
||||
incomingEdgesMap.get(edge.target)!.push(edge)
|
||||
}
|
||||
|
||||
// Position nodes layer by layer, aligning with connected predecessors
|
||||
for (const layerNum of layerNumbers) {
|
||||
const nodesInLayer = layers.get(layerNum)!
|
||||
const xPosition = layerXPositions.get(layerNum)!
|
||||
|
||||
// Separate containers and non-containers
|
||||
const containersInLayer = nodesInLayer.filter(isContainerBlock)
|
||||
const nonContainersInLayer = nodesInLayer.filter((n) => !isContainerBlock(n))
|
||||
|
||||
// For the first layer (layer 0), position sequentially from padding.y
|
||||
if (layerNum === 0) {
|
||||
let yOffset = padding.y
|
||||
|
||||
// Sort containers by height for visual balance
|
||||
containersInLayer.sort((a, b) => b.metrics.height - a.metrics.height)
|
||||
|
||||
for (const node of containersInLayer) {
|
||||
@@ -361,7 +337,6 @@ export function calculatePositions(
|
||||
yOffset += CONTAINER_VERTICAL_CLEARANCE
|
||||
}
|
||||
|
||||
// Sort non-containers by outgoing connections
|
||||
nonContainersInLayer.sort((a, b) => b.outgoing.size - a.outgoing.size)
|
||||
|
||||
for (const node of nonContainersInLayer) {
|
||||
@@ -371,9 +346,7 @@ export function calculatePositions(
|
||||
continue
|
||||
}
|
||||
|
||||
// For subsequent layers, align with connected predecessors (handle-to-handle)
|
||||
for (const node of [...containersInLayer, ...nonContainersInLayer]) {
|
||||
// Find the bottommost predecessor handle Y (highest value) and align to it
|
||||
let bestSourceHandleY = -1
|
||||
let bestEdge: Edge | null = null
|
||||
const incomingEdges = incomingEdgesMap.get(node.id) || []
|
||||
@@ -381,7 +354,6 @@ export function calculatePositions(
|
||||
for (const edge of incomingEdges) {
|
||||
const predecessor = allNodes.get(edge.source)
|
||||
if (predecessor) {
|
||||
// Calculate actual source handle Y position based on block type and handle
|
||||
const sourceHandleOffset = getSourceHandleYOffset(predecessor.block, edge.sourceHandle)
|
||||
const sourceHandleY = predecessor.position.y + sourceHandleOffset
|
||||
|
||||
@@ -392,20 +364,16 @@ export function calculatePositions(
|
||||
}
|
||||
}
|
||||
|
||||
// If no predecessors found (shouldn't happen for layer > 0), use padding
|
||||
if (bestSourceHandleY < 0) {
|
||||
bestSourceHandleY = padding.y + HANDLE_POSITIONS.DEFAULT_Y_OFFSET
|
||||
}
|
||||
|
||||
// Calculate the target handle Y offset for this node
|
||||
const targetHandleOffset = getTargetHandleYOffset(node.block, bestEdge?.targetHandle)
|
||||
|
||||
// Position node so its target handle aligns with the source handle Y
|
||||
node.position = { x: xPosition, y: bestSourceHandleY - targetHandleOffset }
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve vertical overlaps within layers (X overlaps prevented by cumulative positioning)
|
||||
resolveVerticalOverlaps(Array.from(layers.values()).flat(), verticalSpacing)
|
||||
}
|
||||
|
||||
@@ -435,7 +403,7 @@ export function layoutBlocksCore(
|
||||
return { nodes: new Map(), dimensions: { width: 0, height: 0 } }
|
||||
}
|
||||
|
||||
const layoutOptions =
|
||||
const layoutOptions: LayoutOptions =
|
||||
options.layoutOptions ??
|
||||
(options.isContainer ? CONTAINER_LAYOUT_OPTIONS : DEFAULT_LAYOUT_OPTIONS)
|
||||
|
||||
@@ -452,7 +420,13 @@ export function layoutBlocksCore(
|
||||
calculatePositions(layers, edges, layoutOptions)
|
||||
|
||||
// 5. Normalize positions
|
||||
const dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
|
||||
let dimensions = normalizePositions(nodes, { isContainer: options.isContainer })
|
||||
|
||||
// 6. Snap to grid if gridSize is specified (recalculates dimensions)
|
||||
const snappedDimensions = snapNodesToGrid(nodes, layoutOptions.gridSize)
|
||||
if (snappedDimensions) {
|
||||
dimensions = snappedDimensions
|
||||
}
|
||||
|
||||
return { nodes, dimensions }
|
||||
}
|
||||
|
||||
@@ -36,14 +36,13 @@ export function applyAutoLayout(
|
||||
const horizontalSpacing = options.horizontalSpacing ?? DEFAULT_HORIZONTAL_SPACING
|
||||
const verticalSpacing = options.verticalSpacing ?? DEFAULT_VERTICAL_SPACING
|
||||
|
||||
// Pre-calculate container dimensions by laying out their children (bottom-up)
|
||||
// This ensures accurate widths/heights before root-level layout
|
||||
prepareContainerDimensions(
|
||||
blocksCopy,
|
||||
edges,
|
||||
layoutBlocksCore,
|
||||
horizontalSpacing,
|
||||
verticalSpacing
|
||||
verticalSpacing,
|
||||
options.gridSize
|
||||
)
|
||||
|
||||
const { root: rootBlockIds } = getBlocksByParent(blocksCopy)
|
||||
@@ -58,8 +57,6 @@ export function applyAutoLayout(
|
||||
(edge) => layoutRootIds.includes(edge.source) && layoutRootIds.includes(edge.target)
|
||||
)
|
||||
|
||||
// Calculate subflow depths before laying out root blocks
|
||||
// This ensures blocks connected to subflow ends are positioned correctly
|
||||
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
|
||||
|
||||
if (Object.keys(rootBlocks).length > 0) {
|
||||
@@ -95,13 +92,12 @@ export function applyAutoLayout(
|
||||
}
|
||||
|
||||
export type { TargetedLayoutOptions } from '@/lib/workflows/autolayout/targeted'
|
||||
// Function exports
|
||||
export { applyTargetedLayout } from '@/lib/workflows/autolayout/targeted'
|
||||
// Type exports
|
||||
export type { Edge, LayoutOptions, LayoutResult } from '@/lib/workflows/autolayout/types'
|
||||
export {
|
||||
getBlockMetrics,
|
||||
isContainerType,
|
||||
shouldSkipAutoLayout,
|
||||
snapPositionToGrid,
|
||||
transferBlockHeights,
|
||||
} from '@/lib/workflows/autolayout/utils'
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import {
|
||||
CONTAINER_PADDING,
|
||||
DEFAULT_HORIZONTAL_SPACING,
|
||||
@@ -14,12 +13,11 @@ import {
|
||||
isContainerType,
|
||||
prepareContainerDimensions,
|
||||
shouldSkipAutoLayout,
|
||||
snapPositionToGrid,
|
||||
} from '@/lib/workflows/autolayout/utils'
|
||||
import { CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
|
||||
import type { BlockState } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('AutoLayout:Targeted')
|
||||
|
||||
export interface TargetedLayoutOptions extends LayoutOptions {
|
||||
changedBlockIds: string[]
|
||||
verticalSpacing?: number
|
||||
@@ -39,6 +37,7 @@ export function applyTargetedLayout(
|
||||
changedBlockIds,
|
||||
verticalSpacing = DEFAULT_VERTICAL_SPACING,
|
||||
horizontalSpacing = DEFAULT_HORIZONTAL_SPACING,
|
||||
gridSize,
|
||||
} = options
|
||||
|
||||
if (!changedBlockIds || changedBlockIds.length === 0) {
|
||||
@@ -48,19 +47,17 @@ export function applyTargetedLayout(
|
||||
const changedSet = new Set(changedBlockIds)
|
||||
const blocksCopy: Record<string, BlockState> = JSON.parse(JSON.stringify(blocks))
|
||||
|
||||
// Pre-calculate container dimensions by laying out their children (bottom-up)
|
||||
// This ensures accurate widths/heights before root-level layout
|
||||
prepareContainerDimensions(
|
||||
blocksCopy,
|
||||
edges,
|
||||
layoutBlocksCore,
|
||||
horizontalSpacing,
|
||||
verticalSpacing
|
||||
verticalSpacing,
|
||||
gridSize
|
||||
)
|
||||
|
||||
const groups = getBlocksByParent(blocksCopy)
|
||||
|
||||
// Calculate subflow depths before layout to properly position blocks after subflow ends
|
||||
const subflowDepths = calculateSubflowDepths(blocksCopy, edges, assignLayers)
|
||||
|
||||
layoutGroup(
|
||||
@@ -71,7 +68,8 @@ export function applyTargetedLayout(
|
||||
changedSet,
|
||||
verticalSpacing,
|
||||
horizontalSpacing,
|
||||
subflowDepths
|
||||
subflowDepths,
|
||||
gridSize
|
||||
)
|
||||
|
||||
for (const [parentId, childIds] of groups.children.entries()) {
|
||||
@@ -83,7 +81,8 @@ export function applyTargetedLayout(
|
||||
changedSet,
|
||||
verticalSpacing,
|
||||
horizontalSpacing,
|
||||
subflowDepths
|
||||
subflowDepths,
|
||||
gridSize
|
||||
)
|
||||
}
|
||||
|
||||
@@ -101,7 +100,8 @@ function layoutGroup(
|
||||
changedSet: Set<string>,
|
||||
verticalSpacing: number,
|
||||
horizontalSpacing: number,
|
||||
subflowDepths: Map<string, number>
|
||||
subflowDepths: Map<string, number>,
|
||||
gridSize?: number
|
||||
): void {
|
||||
if (childIds.length === 0) return
|
||||
|
||||
@@ -116,7 +116,6 @@ function layoutGroup(
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which blocks need repositioning
|
||||
const requestedLayout = layoutEligibleChildIds.filter((id) => {
|
||||
const block = blocks[id]
|
||||
if (!block) return false
|
||||
@@ -141,7 +140,6 @@ function layoutGroup(
|
||||
return
|
||||
}
|
||||
|
||||
// Store old positions for anchor calculation
|
||||
const oldPositions = new Map<string, { x: number; y: number }>()
|
||||
for (const id of layoutEligibleChildIds) {
|
||||
const block = blocks[id]
|
||||
@@ -149,8 +147,6 @@ function layoutGroup(
|
||||
oldPositions.set(id, { ...block.position })
|
||||
}
|
||||
|
||||
// Compute layout positions using core function
|
||||
// Only pass subflowDepths for root-level layout (not inside containers)
|
||||
const layoutPositions = computeLayoutPositions(
|
||||
layoutEligibleChildIds,
|
||||
blocks,
|
||||
@@ -158,7 +154,8 @@ function layoutGroup(
|
||||
parentBlock,
|
||||
horizontalSpacing,
|
||||
verticalSpacing,
|
||||
parentId === null ? subflowDepths : undefined
|
||||
parentId === null ? subflowDepths : undefined,
|
||||
gridSize
|
||||
)
|
||||
|
||||
if (layoutPositions.size === 0) {
|
||||
@@ -168,7 +165,6 @@ function layoutGroup(
|
||||
return
|
||||
}
|
||||
|
||||
// Find anchor block (unchanged block with a layout position)
|
||||
let offsetX = 0
|
||||
let offsetY = 0
|
||||
|
||||
@@ -185,20 +181,16 @@ function layoutGroup(
|
||||
}
|
||||
}
|
||||
|
||||
// Apply new positions only to blocks that need layout
|
||||
for (const id of needsLayout) {
|
||||
const block = blocks[id]
|
||||
const newPos = layoutPositions.get(id)
|
||||
if (!block || !newPos) continue
|
||||
block.position = {
|
||||
x: newPos.x + offsetX,
|
||||
y: newPos.y + offsetY,
|
||||
}
|
||||
block.position = snapPositionToGrid({ x: newPos.x + offsetX, y: newPos.y + offsetY }, gridSize)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes layout positions for a subset of blocks using the core layout
|
||||
* Computes layout positions for a subset of blocks using the core layout function
|
||||
*/
|
||||
function computeLayoutPositions(
|
||||
childIds: string[],
|
||||
@@ -207,7 +199,8 @@ function computeLayoutPositions(
|
||||
parentBlock: BlockState | undefined,
|
||||
horizontalSpacing: number,
|
||||
verticalSpacing: number,
|
||||
subflowDepths?: Map<string, number>
|
||||
subflowDepths?: Map<string, number>,
|
||||
gridSize?: number
|
||||
): Map<string, { x: number; y: number }> {
|
||||
const subsetBlocks: Record<string, BlockState> = {}
|
||||
for (const id of childIds) {
|
||||
@@ -228,11 +221,11 @@ function computeLayoutPositions(
|
||||
layoutOptions: {
|
||||
horizontalSpacing: isContainer ? horizontalSpacing * 0.85 : horizontalSpacing,
|
||||
verticalSpacing,
|
||||
gridSize,
|
||||
},
|
||||
subflowDepths,
|
||||
})
|
||||
|
||||
// Update parent container dimensions if applicable
|
||||
if (parentBlock) {
|
||||
parentBlock.data = {
|
||||
...parentBlock.data,
|
||||
@@ -241,7 +234,6 @@ function computeLayoutPositions(
|
||||
}
|
||||
}
|
||||
|
||||
// Convert nodes to position map
|
||||
const positions = new Map<string, { x: number; y: number }>()
|
||||
for (const node of nodes.values()) {
|
||||
positions.set(node.id, { x: node.position.x, y: node.position.y })
|
||||
|
||||
@@ -7,6 +7,7 @@ export interface LayoutOptions {
|
||||
horizontalSpacing?: number
|
||||
verticalSpacing?: number
|
||||
padding?: { x: number; y: number }
|
||||
gridSize?: number
|
||||
}
|
||||
|
||||
export interface LayoutResult {
|
||||
|
||||
@@ -18,6 +18,61 @@ function resolveNumeric(value: number | undefined, fallback: number): number {
|
||||
return typeof value === 'number' && Number.isFinite(value) ? value : fallback
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps a single coordinate value to the nearest grid position
|
||||
*/
|
||||
function snapToGrid(value: number, gridSize: number): number {
|
||||
return Math.round(value / gridSize) * gridSize
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps a position to the nearest grid point.
|
||||
* Returns the original position if gridSize is 0 or not provided.
|
||||
*/
|
||||
export function snapPositionToGrid(
|
||||
position: { x: number; y: number },
|
||||
gridSize: number | undefined
|
||||
): { x: number; y: number } {
|
||||
if (!gridSize || gridSize <= 0) {
|
||||
return position
|
||||
}
|
||||
return {
|
||||
x: snapToGrid(position.x, gridSize),
|
||||
y: snapToGrid(position.y, gridSize),
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Snaps all node positions in a graph to grid positions and returns updated dimensions.
|
||||
* Returns null if gridSize is not set or no snapping was needed.
|
||||
*/
|
||||
export function snapNodesToGrid(
|
||||
nodes: Map<string, GraphNode>,
|
||||
gridSize: number | undefined
|
||||
): { width: number; height: number } | null {
|
||||
if (!gridSize || gridSize <= 0 || nodes.size === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
let minX = Number.POSITIVE_INFINITY
|
||||
let minY = Number.POSITIVE_INFINITY
|
||||
let maxX = Number.NEGATIVE_INFINITY
|
||||
let maxY = Number.NEGATIVE_INFINITY
|
||||
|
||||
for (const node of nodes.values()) {
|
||||
node.position = snapPositionToGrid(node.position, gridSize)
|
||||
minX = Math.min(minX, node.position.x)
|
||||
minY = Math.min(minY, node.position.y)
|
||||
maxX = Math.max(maxX, node.position.x + node.metrics.width)
|
||||
maxY = Math.max(maxY, node.position.y + node.metrics.height)
|
||||
}
|
||||
|
||||
return {
|
||||
width: maxX - minX + CONTAINER_PADDING * 2,
|
||||
height: maxY - minY + CONTAINER_PADDING * 2,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a block type is a container (loop or parallel)
|
||||
*/
|
||||
@@ -314,6 +369,7 @@ export type LayoutFunction = (
|
||||
horizontalSpacing?: number
|
||||
verticalSpacing?: number
|
||||
padding?: { x: number; y: number }
|
||||
gridSize?: number
|
||||
}
|
||||
subflowDepths?: Map<string, number>
|
||||
}
|
||||
@@ -329,13 +385,15 @@ export type LayoutFunction = (
|
||||
* @param layoutFn - The layout function to use for calculating dimensions
|
||||
* @param horizontalSpacing - Horizontal spacing between blocks
|
||||
* @param verticalSpacing - Vertical spacing between blocks
|
||||
* @param gridSize - Optional grid size for snap-to-grid
|
||||
*/
|
||||
export function prepareContainerDimensions(
|
||||
blocks: Record<string, BlockState>,
|
||||
edges: Edge[],
|
||||
layoutFn: LayoutFunction,
|
||||
horizontalSpacing: number,
|
||||
verticalSpacing: number
|
||||
verticalSpacing: number,
|
||||
gridSize?: number
|
||||
): void {
|
||||
const { children } = getBlocksByParent(blocks)
|
||||
|
||||
@@ -402,6 +460,7 @@ export function prepareContainerDimensions(
|
||||
layoutOptions: {
|
||||
horizontalSpacing: horizontalSpacing * 0.85,
|
||||
verticalSpacing,
|
||||
gridSize,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ export const azureOpenAIProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
|
||||
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
|
||||
|
||||
@@ -77,7 +77,7 @@ export const cerebrasProvider: ProviderConfig = {
|
||||
messages: allMessages,
|
||||
}
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
type: 'json_schema',
|
||||
|
||||
@@ -81,7 +81,7 @@ export const deepseekProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
|
||||
|
||||
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
|
||||
|
||||
|
||||
@@ -349,7 +349,7 @@ export async function executeGeminiRequest(
|
||||
if (request.temperature !== undefined) {
|
||||
geminiConfig.temperature = request.temperature
|
||||
}
|
||||
if (request.maxTokens !== undefined) {
|
||||
if (request.maxTokens != null) {
|
||||
geminiConfig.maxOutputTokens = request.maxTokens
|
||||
}
|
||||
if (systemInstruction) {
|
||||
|
||||
@@ -123,17 +123,21 @@ export function extractFunctionCallPart(candidate: Candidate | undefined): Part
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts usage metadata from SDK response to our format
|
||||
* Converts usage metadata from SDK response to our format.
|
||||
* Per Gemini docs, total = promptTokenCount + candidatesTokenCount + toolUsePromptTokenCount + thoughtsTokenCount
|
||||
* We include toolUsePromptTokenCount in input and thoughtsTokenCount in output for correct billing.
|
||||
*/
|
||||
export function convertUsageMetadata(
|
||||
usageMetadata: GenerateContentResponseUsageMetadata | undefined
|
||||
): GeminiUsage {
|
||||
const promptTokenCount = usageMetadata?.promptTokenCount ?? 0
|
||||
const candidatesTokenCount = usageMetadata?.candidatesTokenCount ?? 0
|
||||
const thoughtsTokenCount = usageMetadata?.thoughtsTokenCount ?? 0
|
||||
const toolUsePromptTokenCount = usageMetadata?.toolUsePromptTokenCount ?? 0
|
||||
const promptTokenCount = (usageMetadata?.promptTokenCount ?? 0) + toolUsePromptTokenCount
|
||||
const candidatesTokenCount = (usageMetadata?.candidatesTokenCount ?? 0) + thoughtsTokenCount
|
||||
return {
|
||||
promptTokenCount,
|
||||
candidatesTokenCount,
|
||||
totalTokenCount: usageMetadata?.totalTokenCount ?? promptTokenCount + candidatesTokenCount,
|
||||
totalTokenCount: usageMetadata?.totalTokenCount ?? 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ export const groqProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
|
||||
@@ -91,7 +91,7 @@ export const mistralProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
|
||||
@@ -1130,7 +1130,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
id: 'cerebras',
|
||||
name: 'Cerebras',
|
||||
description: 'Cerebras Cloud LLMs',
|
||||
defaultModel: 'cerebras/llama-3.3-70b',
|
||||
defaultModel: 'cerebras/gpt-oss-120b',
|
||||
modelPatterns: [/^cerebras/],
|
||||
icon: CerebrasIcon,
|
||||
capabilities: {
|
||||
@@ -1138,44 +1138,64 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
},
|
||||
models: [
|
||||
{
|
||||
id: 'cerebras/llama-3.1-8b',
|
||||
id: 'cerebras/gpt-oss-120b',
|
||||
pricing: {
|
||||
input: 0.35,
|
||||
output: 0.75,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/llama3.1-8b',
|
||||
pricing: {
|
||||
input: 0.1,
|
||||
output: 0.1,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 32000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/llama-3.1-70b',
|
||||
pricing: {
|
||||
input: 0.6,
|
||||
output: 0.6,
|
||||
updatedAt: '2025-10-11',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 128000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/llama-3.3-70b',
|
||||
pricing: {
|
||||
input: 0.6,
|
||||
output: 0.6,
|
||||
updatedAt: '2025-10-11',
|
||||
input: 0.85,
|
||||
output: 1.2,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 128000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/llama-4-scout-17b-16e-instruct',
|
||||
id: 'cerebras/qwen-3-32b',
|
||||
pricing: {
|
||||
input: 0.11,
|
||||
output: 0.34,
|
||||
updatedAt: '2025-10-11',
|
||||
input: 0.4,
|
||||
output: 0.8,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 10000000,
|
||||
contextWindow: 131000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/qwen-3-235b-a22b-instruct-2507',
|
||||
pricing: {
|
||||
input: 0.6,
|
||||
output: 1.2,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131000,
|
||||
},
|
||||
{
|
||||
id: 'cerebras/zai-glm-4.7',
|
||||
pricing: {
|
||||
input: 2.25,
|
||||
output: 2.75,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131000,
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -1194,8 +1214,8 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
id: 'groq/openai/gpt-oss-120b',
|
||||
pricing: {
|
||||
input: 0.15,
|
||||
output: 0.75,
|
||||
updatedAt: '2025-10-11',
|
||||
output: 0.6,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
@@ -1203,9 +1223,29 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
{
|
||||
id: 'groq/openai/gpt-oss-20b',
|
||||
pricing: {
|
||||
input: 0.01,
|
||||
output: 0.25,
|
||||
updatedAt: '2025-10-11',
|
||||
input: 0.075,
|
||||
output: 0.3,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
},
|
||||
{
|
||||
id: 'groq/openai/gpt-oss-safeguard-20b',
|
||||
pricing: {
|
||||
input: 0.075,
|
||||
output: 0.3,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
},
|
||||
{
|
||||
id: 'groq/qwen/qwen3-32b',
|
||||
pricing: {
|
||||
input: 0.29,
|
||||
output: 0.59,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
@@ -1215,7 +1255,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
pricing: {
|
||||
input: 0.05,
|
||||
output: 0.08,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
@@ -1225,27 +1265,17 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
pricing: {
|
||||
input: 0.59,
|
||||
output: 0.79,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
},
|
||||
{
|
||||
id: 'groq/llama-4-scout-17b-instruct',
|
||||
id: 'groq/meta-llama/llama-4-scout-17b-16e-instruct',
|
||||
pricing: {
|
||||
input: 0.11,
|
||||
output: 0.34,
|
||||
updatedAt: '2025-10-11',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
},
|
||||
{
|
||||
id: 'groq/llama-4-maverick-17b-instruct',
|
||||
pricing: {
|
||||
input: 0.5,
|
||||
output: 0.77,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
@@ -1253,9 +1283,9 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
{
|
||||
id: 'groq/meta-llama/llama-4-maverick-17b-128e-instruct',
|
||||
pricing: {
|
||||
input: 0.5,
|
||||
output: 0.77,
|
||||
updatedAt: '2025-10-11',
|
||||
input: 0.2,
|
||||
output: 0.6,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
@@ -1265,7 +1295,7 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
pricing: {
|
||||
input: 0.04,
|
||||
output: 0.04,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 8192,
|
||||
@@ -1275,27 +1305,37 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
|
||||
pricing: {
|
||||
input: 0.59,
|
||||
output: 0.79,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 128000,
|
||||
},
|
||||
{
|
||||
id: 'groq/moonshotai/kimi-k2-instruct',
|
||||
id: 'groq/deepseek-r1-distill-qwen-32b',
|
||||
pricing: {
|
||||
input: 0.69,
|
||||
output: 0.69,
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 128000,
|
||||
},
|
||||
{
|
||||
id: 'groq/moonshotai/kimi-k2-instruct-0905',
|
||||
pricing: {
|
||||
input: 1.0,
|
||||
output: 3.0,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
contextWindow: 262144,
|
||||
},
|
||||
{
|
||||
id: 'groq/meta-llama/llama-guard-4-12b',
|
||||
pricing: {
|
||||
input: 0.2,
|
||||
output: 0.2,
|
||||
updatedAt: '2025-10-11',
|
||||
updatedAt: '2026-01-27',
|
||||
},
|
||||
capabilities: {},
|
||||
contextWindow: 131072,
|
||||
|
||||
@@ -105,7 +105,7 @@ export const ollamaProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
|
||||
@@ -81,7 +81,7 @@ export const openaiProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.reasoningEffort !== undefined) payload.reasoning_effort = request.reasoningEffort
|
||||
if (request.verbosity !== undefined) payload.verbosity = request.verbosity
|
||||
|
||||
@@ -121,7 +121,7 @@ export const openRouterProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_tokens = request.maxTokens
|
||||
|
||||
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
|
||||
let hasActiveTools = false
|
||||
@@ -516,7 +516,7 @@ export const openRouterProvider: ProviderConfig = {
|
||||
return streamingResult as StreamingExecution
|
||||
}
|
||||
|
||||
if (request.responseFormat && hasActiveTools && toolCalls.length > 0) {
|
||||
if (request.responseFormat && hasActiveTools) {
|
||||
const finalPayload: any = {
|
||||
model: payload.model,
|
||||
messages: [...currentMessages],
|
||||
|
||||
@@ -135,7 +135,7 @@ export const vllmProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) payload.max_completion_tokens = request.maxTokens
|
||||
|
||||
if (request.responseFormat) {
|
||||
payload.response_format = {
|
||||
|
||||
@@ -92,7 +92,7 @@ export const xAIProvider: ProviderConfig = {
|
||||
}
|
||||
|
||||
if (request.temperature !== undefined) basePayload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) basePayload.max_tokens = request.maxTokens
|
||||
if (request.maxTokens != null) basePayload.max_completion_tokens = request.maxTokens
|
||||
let preparedTools: ReturnType<typeof prepareToolsWithUsageControl> | null = null
|
||||
|
||||
if (tools?.length) {
|
||||
|
||||
@@ -253,23 +253,6 @@ describe('executeTool Function', () => {
|
||||
vi.restoreAllMocks()
|
||||
})
|
||||
|
||||
it('should handle errors from tools', async () => {
|
||||
setupFetchMock({ status: 400, ok: false, json: { error: 'Bad request' } })
|
||||
|
||||
const result = await executeTool(
|
||||
'http_request',
|
||||
{
|
||||
url: 'https://api.example.com/data',
|
||||
method: 'GET',
|
||||
},
|
||||
true
|
||||
)
|
||||
|
||||
expect(result.success).toBe(false)
|
||||
expect(result.error).toBeDefined()
|
||||
expect(result.timing).toBeDefined()
|
||||
})
|
||||
|
||||
it('should add timing information to results', async () => {
|
||||
const result = await executeTool(
|
||||
'http_request',
|
||||
|
||||
84
helm/sim/templates/cert-manager-issuers.yaml
Normal file
84
helm/sim/templates/cert-manager-issuers.yaml
Normal file
@@ -0,0 +1,84 @@
|
||||
{{- if .Values.certManager.enabled }}
|
||||
{{- /*
|
||||
cert-manager Issuer Bootstrap Pattern
|
||||
|
||||
PREREQUISITE: cert-manager must be installed in your cluster before enabling this.
|
||||
The root CA Certificate is created in the namespace specified by certManager.rootCA.namespace
|
||||
(defaults to "cert-manager"). Ensure this namespace exists and cert-manager is running there.
|
||||
|
||||
Install cert-manager: https://cert-manager.io/docs/installation/
|
||||
|
||||
This implements the recommended pattern from cert-manager documentation:
|
||||
1. A self-signed ClusterIssuer (for bootstrapping the root CA only)
|
||||
2. A root CA Certificate (self-signed, used to sign other certificates)
|
||||
3. A CA ClusterIssuer (uses the root CA to sign certificates)
|
||||
|
||||
Reference: https://cert-manager.io/docs/configuration/selfsigned/
|
||||
*/ -}}
|
||||
|
||||
---
|
||||
# 1. Self-Signed ClusterIssuer (Bootstrap Only)
|
||||
# This issuer is used ONLY to create the root CA certificate.
|
||||
# It should NOT be used directly for application certificates.
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.certManager.selfSignedIssuer.name }}
|
||||
labels:
|
||||
{{- include "sim.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: cert-manager
|
||||
spec:
|
||||
selfSigned: {}
|
||||
|
||||
---
|
||||
# 2. Root CA Certificate
|
||||
# This certificate is signed by the self-signed issuer and becomes the root of trust.
|
||||
# The secret created here will be used by the CA issuer to sign certificates.
|
||||
# NOTE: This must be created in the cert-manager namespace (or the namespace specified
|
||||
# in certManager.rootCA.namespace). Ensure cert-manager is installed there first.
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ .Values.certManager.rootCA.certificateName }}
|
||||
namespace: {{ .Values.certManager.rootCA.namespace | default "cert-manager" }} # Must match cert-manager's cluster-resource-namespace
|
||||
labels:
|
||||
{{- include "sim.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: cert-manager
|
||||
spec:
|
||||
isCA: true
|
||||
commonName: {{ .Values.certManager.rootCA.commonName }}
|
||||
secretName: {{ .Values.certManager.rootCA.secretName }}
|
||||
duration: {{ .Values.certManager.rootCA.duration | default "87600h" }}
|
||||
renewBefore: {{ .Values.certManager.rootCA.renewBefore | default "2160h" }}
|
||||
privateKey:
|
||||
algorithm: {{ .Values.certManager.rootCA.privateKey.algorithm | default "RSA" }}
|
||||
size: {{ .Values.certManager.rootCA.privateKey.size | default 4096 }}
|
||||
subject:
|
||||
organizations:
|
||||
{{- if .Values.certManager.rootCA.subject.organizations }}
|
||||
{{- toYaml .Values.certManager.rootCA.subject.organizations | nindent 6 }}
|
||||
{{- else }}
|
||||
- {{ .Release.Name }}
|
||||
{{- end }}
|
||||
issuerRef:
|
||||
name: {{ .Values.certManager.selfSignedIssuer.name }}
|
||||
kind: ClusterIssuer
|
||||
group: cert-manager.io
|
||||
|
||||
---
|
||||
# 3. CA ClusterIssuer
|
||||
# This is the issuer that should be used by applications to obtain certificates.
|
||||
# It signs certificates using the root CA created above.
|
||||
# NOTE: This issuer may briefly show "not ready" on first install while cert-manager
|
||||
# processes the Certificate above and creates the secret. It will auto-reconcile.
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: {{ .Values.certManager.caIssuer.name }}
|
||||
labels:
|
||||
{{- include "sim.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: cert-manager
|
||||
spec:
|
||||
ca:
|
||||
secretName: {{ .Values.certManager.rootCA.secretName }}
|
||||
{{- end }}
|
||||
@@ -1,6 +1,36 @@
|
||||
{{- if and .Values.ollama.enabled .Values.ollama.gpu.enabled }}
|
||||
---
|
||||
# NVIDIA Device Plugin DaemonSet for GPU support
|
||||
# 1. ConfigMap for NVIDIA Device Plugin Configuration
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "sim.fullname" . }}-nvidia-device-plugin-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "sim.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: nvidia-device-plugin
|
||||
data:
|
||||
config.yaml: |
|
||||
version: v1
|
||||
flags:
|
||||
{{- if eq .Values.ollama.gpu.strategy "mig" }}
|
||||
migStrategy: "single"
|
||||
{{- else }}
|
||||
migStrategy: "none"
|
||||
{{- end }}
|
||||
failOnInitError: false
|
||||
plugin:
|
||||
passDeviceSpecs: true
|
||||
deviceListStrategy: envvar
|
||||
{{- if eq .Values.ollama.gpu.strategy "time-slicing" }}
|
||||
sharing:
|
||||
timeSlicing:
|
||||
resources:
|
||||
- name: nvidia.com/gpu
|
||||
replicas: {{ .Values.ollama.gpu.timeSlicingReplicas | default 5 }}
|
||||
{{- end }}
|
||||
---
|
||||
# 2. NVIDIA Device Plugin DaemonSet for GPU support
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
@@ -35,9 +65,6 @@ spec:
|
||||
# Only schedule on nodes with NVIDIA GPUs
|
||||
accelerator: nvidia
|
||||
priorityClassName: system-node-critical
|
||||
runtimeClassName: nvidia
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
volumes:
|
||||
- name: device-plugin
|
||||
hostPath:
|
||||
@@ -48,22 +75,21 @@ spec:
|
||||
- name: sys
|
||||
hostPath:
|
||||
path: /sys
|
||||
- name: proc-driver-nvidia
|
||||
hostPath:
|
||||
path: /proc/driver/nvidia
|
||||
# Volume to mount the ConfigMap
|
||||
- name: nvidia-device-plugin-config
|
||||
configMap:
|
||||
name: {{ include "sim.fullname" . }}-nvidia-device-plugin-config
|
||||
containers:
|
||||
- name: nvidia-device-plugin
|
||||
image: nvcr.io/nvidia/k8s-device-plugin:v0.14.5
|
||||
image: nvcr.io/nvidia/k8s-device-plugin:v0.18.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- --mig-strategy=single
|
||||
- --pass-device-specs=true
|
||||
- --fail-on-init-error=false
|
||||
- --device-list-strategy=envvar
|
||||
- --nvidia-driver-root=/host-sys/fs/cgroup
|
||||
- "--config-file=/etc/device-plugin/config.yaml"
|
||||
{{- if eq .Values.ollama.gpu.strategy "mig" }}
|
||||
env:
|
||||
- name: NVIDIA_MIG_MONITOR_DEVICES
|
||||
value: all
|
||||
{{- end }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
@@ -74,29 +100,16 @@ spec:
|
||||
- name: dev
|
||||
mountPath: /dev
|
||||
- name: sys
|
||||
mountPath: /host-sys
|
||||
mountPath: /sys
|
||||
readOnly: true
|
||||
- name: proc-driver-nvidia
|
||||
mountPath: /proc/driver/nvidia
|
||||
- name: nvidia-device-plugin-config
|
||||
mountPath: /etc/device-plugin/
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 10Mi
|
||||
memory: 20Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 20Mi
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml .Values.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
---
|
||||
# RuntimeClass for NVIDIA Container Runtime
|
||||
apiVersion: node.k8s.io/v1
|
||||
kind: RuntimeClass
|
||||
metadata:
|
||||
name: {{ include "sim.fullname" . }}-nvidia
|
||||
labels:
|
||||
{{- include "sim.labels" . | nindent 4 }}
|
||||
handler: nvidia
|
||||
{{- end }}
|
||||
memory: 50Mi
|
||||
{{- end }}
|
||||
|
||||
@@ -400,8 +400,10 @@ postgresql:
|
||||
algorithm: RSA # RSA or ECDSA
|
||||
size: 4096 # Key size in bits
|
||||
# Issuer reference (REQUIRED if tls.enabled is true)
|
||||
# By default, references the CA issuer created by certManager.caIssuer
|
||||
# Make sure certManager.enabled is true, or provide your own issuer
|
||||
issuerRef:
|
||||
name: selfsigned-cluster-issuer # Name of your cert-manager Issuer/ClusterIssuer
|
||||
name: sim-ca-issuer # Name of your cert-manager Issuer/ClusterIssuer
|
||||
kind: ClusterIssuer # ClusterIssuer or Issuer
|
||||
group: "" # Optional: cert-manager.io (leave empty for default)
|
||||
# Additional DNS names (optional)
|
||||
@@ -463,20 +465,26 @@ externalDatabase:
|
||||
ollama:
|
||||
# Enable/disable Ollama deployment
|
||||
enabled: false
|
||||
|
||||
|
||||
# Image configuration
|
||||
image:
|
||||
repository: ollama/ollama
|
||||
tag: latest
|
||||
pullPolicy: Always
|
||||
|
||||
|
||||
# Number of replicas
|
||||
replicaCount: 1
|
||||
|
||||
|
||||
# GPU configuration
|
||||
gpu:
|
||||
enabled: false
|
||||
count: 1
|
||||
# GPU sharing strategy: "mig" (Multi-Instance GPU) or "time-slicing"
|
||||
# - mig: Hardware-level GPU partitioning (requires supported GPUs like A100)
|
||||
# - time-slicing: Software-level GPU sharing (works with most NVIDIA GPUs)
|
||||
strategy: "time-slicing"
|
||||
# Number of time-slicing replicas (only used when strategy is "time-slicing")
|
||||
timeSlicingReplicas: 5
|
||||
|
||||
# Node selector for GPU workloads (adjust labels based on your cluster configuration)
|
||||
nodeSelector:
|
||||
@@ -1185,4 +1193,53 @@ externalSecrets:
|
||||
# External database password (when using managed database services)
|
||||
externalDatabase:
|
||||
# Path to external database password in external store
|
||||
password: ""
|
||||
password: ""
|
||||
|
||||
# cert-manager configuration
|
||||
# Prerequisites: Install cert-manager in your cluster first
|
||||
# See: https://cert-manager.io/docs/installation/
|
||||
#
|
||||
# This implements the recommended CA bootstrap pattern from cert-manager:
|
||||
# 1. Self-signed ClusterIssuer (bootstrap only - creates root CA)
|
||||
# 2. Root CA Certificate (self-signed, becomes the trust anchor)
|
||||
# 3. CA ClusterIssuer (signs application certificates using root CA)
|
||||
#
|
||||
# Reference: https://cert-manager.io/docs/configuration/selfsigned/
|
||||
certManager:
|
||||
# Enable/disable cert-manager issuer resources
|
||||
enabled: false
|
||||
|
||||
# Self-signed ClusterIssuer (used ONLY to bootstrap the root CA)
|
||||
# Do not reference this issuer directly for application certificates
|
||||
selfSignedIssuer:
|
||||
name: "sim-selfsigned-bootstrap-issuer"
|
||||
|
||||
# Root CA Certificate configuration
|
||||
# This certificate is signed by the self-signed issuer and used as the trust anchor
|
||||
rootCA:
|
||||
# Name of the Certificate resource
|
||||
certificateName: "sim-root-ca"
|
||||
# Namespace where the root CA certificate and secret will be created
|
||||
# Must match cert-manager's cluster-resource-namespace (default: cert-manager)
|
||||
namespace: "cert-manager"
|
||||
# Common name for the root CA certificate
|
||||
commonName: "sim-root-ca"
|
||||
# Secret name where the root CA certificate and key will be stored
|
||||
secretName: "sim-root-ca-secret"
|
||||
# Certificate validity duration (default: 10 years)
|
||||
duration: "87600h"
|
||||
# Renew before expiry (default: 90 days)
|
||||
renewBefore: "2160h"
|
||||
# Private key configuration
|
||||
privateKey:
|
||||
algorithm: RSA
|
||||
size: 4096
|
||||
# Subject configuration
|
||||
subject:
|
||||
organizations: []
|
||||
# If empty, defaults to the release name
|
||||
|
||||
# CA ClusterIssuer configuration
|
||||
# This is the issuer that applications should reference for obtaining certificates
|
||||
caIssuer:
|
||||
name: "sim-ca-issuer"
|
||||
Reference in New Issue
Block a user