Compare commits

...

9 Commits

Author SHA1 Message Date
waleed
d222e924b1 fix(azure): conditionally added responses api 2025-12-16 21:18:40 -08:00
Waleed
dcbeca1abe fix(subflow): fix json stringification in subflow collections (#2419)
* fix(subflow): fix json stringification in subflow collections

* cleanup
2025-12-16 20:47:58 -08:00
Waleed
27ea333974 fix(chat): fix stale closure in workflow runner for chat (#2418) 2025-12-16 19:59:02 -08:00
Waleed
9861d3a0ac improvement(helm): added more to helm charts, remove instance selector for various cloud providers (#2412)
* improvement(helm): added more to helm charts, remove instance selector for various cloud providers

* ack PR comment
2025-12-16 18:24:00 -08:00
Waleed
fdbf8be79b fix(logs-search): restored support for log search queries (#2417) 2025-12-16 18:18:46 -08:00
Adam Gough
6f4f4e22f0 fix(loop): increased max loop iterations to 1000 (#2413) 2025-12-16 16:08:56 -08:00
Vikhyath Mondreti
f7d2c9667f fix(serializer): condition check should check if any condition are met (#2410)
* fix(serializer): condition check should check if any condition are met

* remove comments

* remove more comments
2025-12-16 14:36:40 -08:00
Waleed
29befbc5f6 feat(schedule): add input form to schedule (#2405)
* feat(schedule): add input form to schedule

* change placeholder
2025-12-16 11:23:57 -08:00
Vikhyath Mondreti
9cf8aaee1b fix(sockets): add zod as direct sockets server dep (#2397)
* fix(sockets): add zod as direct sockets server dep

* fix bun lock
2025-12-15 20:25:40 -08:00
25 changed files with 943 additions and 592 deletions

View File

@@ -6,7 +6,22 @@ import {
workflowDeploymentVersion,
workflowExecutionLogs,
} from '@sim/db/schema'
import { and, desc, eq, gte, inArray, isNotNull, isNull, lte, or, type SQL, sql } from 'drizzle-orm'
import {
and,
desc,
eq,
gt,
gte,
inArray,
isNotNull,
isNull,
lt,
lte,
ne,
or,
type SQL,
sql,
} from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
@@ -22,14 +37,19 @@ const QueryParamsSchema = z.object({
limit: z.coerce.number().optional().default(100),
offset: z.coerce.number().optional().default(0),
level: z.string().optional(),
workflowIds: z.string().optional(), // Comma-separated list of workflow IDs
folderIds: z.string().optional(), // Comma-separated list of folder IDs
triggers: z.string().optional(), // Comma-separated list of trigger types
workflowIds: z.string().optional(),
folderIds: z.string().optional(),
triggers: z.string().optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
workflowName: z.string().optional(),
folderName: z.string().optional(),
executionId: z.string().optional(),
costOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
costValue: z.coerce.number().optional(),
durationOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
durationValue: z.coerce.number().optional(),
workspaceId: z.string(),
})
@@ -49,7 +69,6 @@ export async function GET(request: NextRequest) {
const { searchParams } = new URL(request.url)
const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries()))
// Conditionally select columns based on detail level to optimize performance
const selectColumns =
params.details === 'full'
? {
@@ -63,9 +82,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: workflowExecutionLogs.executionData, // Large field - only in full mode
executionData: workflowExecutionLogs.executionData,
cost: workflowExecutionLogs.cost,
files: workflowExecutionLogs.files, // Large field - only in full mode
files: workflowExecutionLogs.files,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -82,7 +101,6 @@ export async function GET(request: NextRequest) {
deploymentVersionName: workflowDeploymentVersion.name,
}
: {
// Basic mode - exclude large fields for better performance
id: workflowExecutionLogs.id,
workflowId: workflowExecutionLogs.workflowId,
executionId: workflowExecutionLogs.executionId,
@@ -93,9 +111,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: sql<null>`NULL`, // Exclude large execution data in basic mode
executionData: sql<null>`NULL`,
cost: workflowExecutionLogs.cost,
files: sql<null>`NULL`, // Exclude files in basic mode
files: sql<null>`NULL`,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -109,7 +127,7 @@ export async function GET(request: NextRequest) {
pausedTotalPauseCount: pausedExecutions.totalPauseCount,
pausedResumedCount: pausedExecutions.resumedCount,
deploymentVersion: workflowDeploymentVersion.version,
deploymentVersionName: sql<null>`NULL`, // Only needed in full mode for details panel
deploymentVersionName: sql<null>`NULL`,
}
const baseQuery = db
@@ -139,34 +157,28 @@ export async function GET(request: NextRequest) {
)
)
// Build additional conditions for the query
let conditions: SQL | undefined
// Filter by level with support for derived statuses (running, pending)
if (params.level && params.level !== 'all') {
const levels = params.level.split(',').filter(Boolean)
const levelConditions: SQL[] = []
for (const level of levels) {
if (level === 'error') {
// Direct database field
levelConditions.push(eq(workflowExecutionLogs.level, 'error'))
} else if (level === 'info') {
// Completed info logs only (not running, not pending)
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNotNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'running') {
// Running logs: info level with no endedAt
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'pending') {
// Pending logs: info level with pause status indicators
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
or(
@@ -189,7 +201,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by specific workflow IDs
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
if (workflowIds.length > 0) {
@@ -197,7 +208,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by folder IDs
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
if (folderIds.length > 0) {
@@ -205,7 +215,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by triggers
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
@@ -213,7 +222,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by date range
if (params.startDate) {
conditions = and(
conditions,
@@ -224,33 +232,79 @@ export async function GET(request: NextRequest) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
// Filter by search query
if (params.search) {
const searchTerm = `%${params.search}%`
// With message removed, restrict search to executionId only
conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`)
}
// Filter by workflow name (from advanced search input)
if (params.workflowName) {
const nameTerm = `%${params.workflowName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`)
}
// Filter by folder name (best-effort text match when present on workflows)
if (params.folderName) {
const folderTerm = `%${params.folderName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`)
}
// Execute the query using the optimized join
if (params.executionId) {
conditions = and(conditions, eq(workflowExecutionLogs.executionId, params.executionId))
}
if (params.costOperator && params.costValue !== undefined) {
const costField = sql`(${workflowExecutionLogs.cost}->>'total')::numeric`
switch (params.costOperator) {
case '=':
conditions = and(conditions, sql`${costField} = ${params.costValue}`)
break
case '>':
conditions = and(conditions, sql`${costField} > ${params.costValue}`)
break
case '<':
conditions = and(conditions, sql`${costField} < ${params.costValue}`)
break
case '>=':
conditions = and(conditions, sql`${costField} >= ${params.costValue}`)
break
case '<=':
conditions = and(conditions, sql`${costField} <= ${params.costValue}`)
break
case '!=':
conditions = and(conditions, sql`${costField} != ${params.costValue}`)
break
}
}
if (params.durationOperator && params.durationValue !== undefined) {
const durationField = workflowExecutionLogs.totalDurationMs
switch (params.durationOperator) {
case '=':
conditions = and(conditions, eq(durationField, params.durationValue))
break
case '>':
conditions = and(conditions, gt(durationField, params.durationValue))
break
case '<':
conditions = and(conditions, lt(durationField, params.durationValue))
break
case '>=':
conditions = and(conditions, gte(durationField, params.durationValue))
break
case '<=':
conditions = and(conditions, lte(durationField, params.durationValue))
break
case '!=':
conditions = and(conditions, ne(durationField, params.durationValue))
break
}
}
const logs = await baseQuery
.where(conditions)
.orderBy(desc(workflowExecutionLogs.startedAt))
.limit(params.limit)
.offset(params.offset)
// Get total count for pagination using the same join structure
const countQuery = db
.select({ count: sql<number>`count(*)` })
.from(workflowExecutionLogs)
@@ -279,13 +333,10 @@ export async function GET(request: NextRequest) {
const count = countResult[0]?.count || 0
// Block executions are now extracted from trace spans instead of separate table
const blockExecutionsByExecution: Record<string, any[]> = {}
// Create clean trace spans from block executions
const createTraceSpans = (blockExecutions: any[]) => {
return blockExecutions.map((block, index) => {
// For error blocks, include error information in the output
let output = block.outputData
if (block.status === 'error' && block.errorMessage) {
output = {
@@ -314,7 +365,6 @@ export async function GET(request: NextRequest) {
})
}
// Extract cost information from block executions
const extractCostSummary = (blockExecutions: any[]) => {
let totalCost = 0
let totalInputCost = 0
@@ -333,7 +383,6 @@ export async function GET(request: NextRequest) {
totalPromptTokens += block.cost.tokens?.prompt || 0
totalCompletionTokens += block.cost.tokens?.completion || 0
// Track per-model costs
if (block.cost.model) {
if (!models.has(block.cost.model)) {
models.set(block.cost.model, {
@@ -363,34 +412,29 @@ export async function GET(request: NextRequest) {
prompt: totalPromptTokens,
completion: totalCompletionTokens,
},
models: Object.fromEntries(models), // Convert Map to object for JSON serialization
models: Object.fromEntries(models),
}
}
// Transform to clean log format with workflow data included
const enhancedLogs = logs.map((log) => {
const blockExecutions = blockExecutionsByExecution[log.executionId] || []
// Only process trace spans and detailed cost in full mode
let traceSpans = []
let finalOutput: any
let costSummary = (log.cost as any) || { total: 0 }
if (params.details === 'full' && log.executionData) {
// Use stored trace spans if available, otherwise create from block executions
const storedTraceSpans = (log.executionData as any)?.traceSpans
traceSpans =
storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0
? storedTraceSpans
: createTraceSpans(blockExecutions)
// Prefer stored cost JSON; otherwise synthesize from blocks
costSummary =
log.cost && Object.keys(log.cost as any).length > 0
? (log.cost as any)
: extractCostSummary(blockExecutions)
// Include finalOutput if present on executionData
try {
const fo = (log.executionData as any)?.finalOutput
if (fo !== undefined) finalOutput = fo

View File

@@ -2,11 +2,9 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Search, X } from 'lucide-react'
import { useParams } from 'next/navigation'
import { Button, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { Badge, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import { createLogger } from '@/lib/logs/console/logger'
import { getIntegrationMetadata } from '@/lib/logs/get-trigger-options'
import { getTriggerOptions } from '@/lib/logs/get-trigger-options'
import { type ParsedFilter, parseQuery } from '@/lib/logs/query-parser'
import {
type FolderData,
@@ -18,7 +16,15 @@ import { useSearchState } from '@/app/workspace/[workspaceId]/logs/hooks/use-sea
import { useFolderStore } from '@/stores/folders/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('AutocompleteSearch')
function truncateFilterValue(field: string, value: string): string {
if ((field === 'executionId' || field === 'workflowId') && value.length > 12) {
return `...${value.slice(-6)}`
}
if (value.length > 20) {
return `${value.slice(0, 17)}...`
}
return value
}
interface AutocompleteSearchProps {
value: string
@@ -35,11 +41,8 @@ export function AutocompleteSearch({
className,
onOpenChange,
}: AutocompleteSearchProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
const workflows = useWorkflowRegistry((state) => state.workflows)
const folders = useFolderStore((state) => state.folders)
const [triggersData, setTriggersData] = useState<TriggerData[]>([])
const workflowsData = useMemo<WorkflowData[]>(() => {
return Object.values(workflows).map((w) => ({
@@ -56,32 +59,13 @@ export function AutocompleteSearch({
}))
}, [folders])
useEffect(() => {
if (!workspaceId) return
const fetchTriggers = async () => {
try {
const response = await fetch(`/api/logs/triggers?workspaceId=${workspaceId}`)
if (!response.ok) return
const data = await response.json()
const triggers: TriggerData[] = data.triggers.map((trigger: string) => {
const metadata = getIntegrationMetadata(trigger)
return {
value: trigger,
label: metadata.label,
color: metadata.color,
}
})
setTriggersData(triggers)
} catch (error) {
logger.error('Failed to fetch triggers:', error)
}
}
fetchTriggers()
}, [workspaceId])
const triggersData = useMemo<TriggerData[]>(() => {
return getTriggerOptions().map((t) => ({
value: t.value,
label: t.label,
color: t.color,
}))
}, [])
const suggestionEngine = useMemo(() => {
return new SearchSuggestions(workflowsData, foldersData, triggersData)
@@ -103,7 +87,6 @@ export function AutocompleteSearch({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
inputRef,
dropdownRef,
handleInputChange,
@@ -122,7 +105,6 @@ export function AutocompleteSearch({
const lastExternalValue = useRef(value)
useEffect(() => {
// Only re-initialize if value changed externally (not from user typing)
if (value !== lastExternalValue.current) {
lastExternalValue.current = value
const parsed = parseQuery(value)
@@ -130,7 +112,6 @@ export function AutocompleteSearch({
}
}, [value, initializeFromQuery])
// Initial sync on mount
useEffect(() => {
if (value) {
const parsed = parseQuery(value)
@@ -189,40 +170,49 @@ export function AutocompleteSearch({
<div className='flex flex-1 items-center gap-[6px] overflow-x-auto pr-[6px] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden'>
{/* Applied Filter Badges */}
{appliedFilters.map((filter, index) => (
<Button
<Badge
key={`${filter.field}-${filter.value}-${index}`}
variant='outline'
className={cn(
'h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]',
highlightedBadgeIndex === index && 'border'
)}
onClick={(e) => {
e.preventDefault()
removeBadge(index)
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => removeBadge(index)}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
removeBadge(index)
}
}}
>
<span className='text-[var(--text-muted)]'>{filter.field}:</span>
<span className='text-[var(--text-primary)]'>
{filter.operator !== '=' && filter.operator}
{filter.originalValue}
{truncateFilterValue(filter.field, filter.originalValue)}
</span>
<X className='h-3 w-3' />
</Button>
<X className='h-3 w-3 shrink-0' />
</Badge>
))}
{/* Text Search Badge (if present) */}
{hasTextSearch && (
<Button
<Badge
variant='outline'
className='h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]'
onClick={(e) => {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => handleFiltersChange(appliedFilters, '')}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
}
}}
>
<span className='text-[var(--text-primary)]'>"{textSearch}"</span>
<X className='h-3 w-3' />
</Button>
<span className='max-w-[150px] truncate text-[var(--text-primary)]'>
"{textSearch}"
</span>
<X className='h-3 w-3 shrink-0' />
</Badge>
)}
{/* Input - only current typing */}
@@ -261,9 +251,8 @@ export function AutocompleteSearch({
sideOffset={4}
onOpenAutoFocus={(e) => e.preventDefault()}
>
<div className='max-h-96 overflow-y-auto'>
<div className='max-h-96 overflow-y-auto px-1'>
{sections.length > 0 ? (
// Multi-section layout
<div className='py-1'>
{/* Show all results (no header) */}
{suggestions[0]?.category === 'show-all' && (
@@ -271,9 +260,9 @@ export function AutocompleteSearch({
key={suggestions[0].id}
data-index={0}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(0)}
onMouseDown={(e) => {
@@ -287,7 +276,7 @@ export function AutocompleteSearch({
{sections.map((section) => (
<div key={section.title}>
<div className='border-[var(--divider)] border-t px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
{section.title}
</div>
{section.suggestions.map((suggestion) => {
@@ -301,9 +290,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -312,19 +301,11 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
{suggestion.value !== suggestion.label && (
<div className='flex-shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
{suggestion.category === 'workflow' ||
suggestion.category === 'folder'
? `${suggestion.category}:`
@@ -342,7 +323,7 @@ export function AutocompleteSearch({
// Single section layout
<div className='py-1'>
{suggestionType === 'filters' && (
<div className='border-[var(--divider)] border-b px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
SUGGESTED FILTERS
</div>
)}
@@ -352,10 +333,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
index === highlightedIndex &&
'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
index === highlightedIndex && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -364,17 +344,9 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
</div>
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
{suggestion.description && (
<div className='flex-shrink-0 text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 text-[11px] text-[var(--text-muted)]'>
{suggestion.value}
</div>
)}

View File

@@ -21,21 +21,15 @@ export function useSearchState({
const [currentInput, setCurrentInput] = useState('')
const [textSearch, setTextSearch] = useState('')
// Dropdown state
const [isOpen, setIsOpen] = useState(false)
const [suggestions, setSuggestions] = useState<Suggestion[]>([])
const [sections, setSections] = useState<SuggestionSection[]>([])
const [highlightedIndex, setHighlightedIndex] = useState(-1)
// Badge interaction
const [highlightedBadgeIndex, setHighlightedBadgeIndex] = useState<number | null>(null)
// Refs
const inputRef = useRef<HTMLInputElement>(null)
const dropdownRef = useRef<HTMLDivElement>(null)
const debounceRef = useRef<NodeJS.Timeout | null>(null)
// Update suggestions when input changes
const updateSuggestions = useCallback(
(input: string) => {
const suggestionGroup = getSuggestions(input)
@@ -55,13 +49,10 @@ export function useSearchState({
[getSuggestions]
)
// Handle input changes
const handleInputChange = useCallback(
(value: string) => {
setCurrentInput(value)
setHighlightedBadgeIndex(null) // Clear badge highlight on any input
// Debounce suggestion updates
if (debounceRef.current) {
clearTimeout(debounceRef.current)
}
@@ -73,11 +64,9 @@ export function useSearchState({
[updateSuggestions, debounceMs]
)
// Handle suggestion selection
const handleSuggestionSelect = useCallback(
(suggestion: Suggestion) => {
if (suggestion.category === 'show-all') {
// Treat as text search
setTextSearch(suggestion.value)
setCurrentInput('')
setIsOpen(false)
@@ -85,15 +74,12 @@ export function useSearchState({
return
}
// Check if this is a filter-key suggestion (ends with ':')
if (suggestion.category === 'filters' && suggestion.value.endsWith(':')) {
// Set input to the filter key and keep dropdown open for values
setCurrentInput(suggestion.value)
updateSuggestions(suggestion.value)
return
}
// For filter values, workflows, folders - add as a filter
const newFilter: ParsedFilter = {
field: suggestion.value.split(':')[0] as any,
operator: '=',
@@ -110,15 +96,12 @@ export function useSearchState({
setCurrentInput('')
setTextSearch('')
// Notify parent
onFiltersChange(updatedFilters, '')
// Focus back on input and reopen dropdown with empty suggestions
if (inputRef.current) {
inputRef.current.focus()
}
// Show filter keys dropdown again after selection
setTimeout(() => {
updateSuggestions('')
}, 50)
@@ -126,12 +109,10 @@ export function useSearchState({
[appliedFilters, onFiltersChange, updateSuggestions]
)
// Remove a badge
const removeBadge = useCallback(
(index: number) => {
const updatedFilters = appliedFilters.filter((_, i) => i !== index)
setAppliedFilters(updatedFilters)
setHighlightedBadgeIndex(null)
onFiltersChange(updatedFilters, textSearch)
if (inputRef.current) {
@@ -141,39 +122,22 @@ export function useSearchState({
[appliedFilters, textSearch, onFiltersChange]
)
// Handle keyboard navigation
const handleKeyDown = useCallback(
(event: React.KeyboardEvent) => {
// Backspace on empty input - badge deletion
if (event.key === 'Backspace' && currentInput === '') {
event.preventDefault()
if (highlightedBadgeIndex !== null) {
// Delete highlighted badge
removeBadge(highlightedBadgeIndex)
} else if (appliedFilters.length > 0) {
// Highlight last badge
setHighlightedBadgeIndex(appliedFilters.length - 1)
if (appliedFilters.length > 0) {
event.preventDefault()
removeBadge(appliedFilters.length - 1)
}
return
}
// Clear badge highlight on any other key when not in dropdown navigation
if (
highlightedBadgeIndex !== null &&
!['ArrowDown', 'ArrowUp', 'Enter'].includes(event.key)
) {
setHighlightedBadgeIndex(null)
}
// Enter key
if (event.key === 'Enter') {
event.preventDefault()
if (isOpen && highlightedIndex >= 0 && suggestions[highlightedIndex]) {
handleSuggestionSelect(suggestions[highlightedIndex])
} else if (currentInput.trim()) {
// Submit current input as text search
setTextSearch(currentInput.trim())
setCurrentInput('')
setIsOpen(false)
@@ -182,7 +146,6 @@ export function useSearchState({
return
}
// Dropdown navigation
if (!isOpen) return
switch (event.key) {
@@ -216,7 +179,6 @@ export function useSearchState({
},
[
currentInput,
highlightedBadgeIndex,
appliedFilters,
isOpen,
highlightedIndex,
@@ -227,12 +189,10 @@ export function useSearchState({
]
)
// Handle focus
const handleFocus = useCallback(() => {
updateSuggestions(currentInput)
}, [currentInput, updateSuggestions])
// Handle blur
const handleBlur = useCallback(() => {
setTimeout(() => {
setIsOpen(false)
@@ -240,7 +200,6 @@ export function useSearchState({
}, 150)
}, [])
// Clear all filters
const clearAll = useCallback(() => {
setAppliedFilters([])
setCurrentInput('')
@@ -253,7 +212,6 @@ export function useSearchState({
}
}, [onFiltersChange])
// Initialize from external value (URL params, etc.)
const initializeFromQuery = useCallback((query: string, filters: ParsedFilter[]) => {
setAppliedFilters(filters)
setTextSearch(query)
@@ -261,7 +219,6 @@ export function useSearchState({
}, [])
return {
// State
appliedFilters,
currentInput,
textSearch,
@@ -269,13 +226,10 @@ export function useSearchState({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
// Refs
inputRef,
dropdownRef,
// Handlers
handleInputChange,
handleSuggestionSelect,
handleKeyDown,
@@ -285,7 +239,6 @@ export function useSearchState({
clearAll,
initializeFromQuery,
// Setters for external control
setHighlightedIndex,
}
}

View File

@@ -91,8 +91,7 @@ export function FieldFormat({
placeholder = 'fieldName',
showType = true,
showValue = false,
valuePlaceholder = 'Enter test value',
config,
valuePlaceholder = 'Enter default value',
}: FieldFormatProps) {
const [storeValue, setStoreValue] = useSubBlockValue<Field[]>(blockId, subBlockId)
const valueInputRefs = useRef<Record<string, HTMLInputElement | HTMLTextAreaElement>>({})
@@ -454,7 +453,6 @@ export function FieldFormat({
)
}
// Export specific components for backward compatibility
export function InputFormat(props: Omit<FieldFormatProps, 'title' | 'placeholder'>) {
return <FieldFormat {...props} title='Input' placeholder='firstName' />
}

View File

@@ -26,7 +26,7 @@ const SUBFLOW_CONFIG = {
},
typeKey: 'loopType' as const,
storeKey: 'loops' as const,
maxIterations: 100,
maxIterations: 1000,
configKeys: {
iterations: 'iterations' as const,
items: 'forEachItems' as const,

View File

@@ -655,6 +655,7 @@ export function useWorkflowExecution() {
setExecutor,
setPendingBlocks,
setActiveBlocks,
workflows,
]
)

View File

@@ -155,6 +155,15 @@ export const ScheduleBlock: BlockConfig = {
condition: { field: 'scheduleType', value: ['minutes', 'hourly'], not: true },
},
{
id: 'inputFormat',
title: 'Input Format',
type: 'input-format',
description:
'Define input parameters that will be available when the schedule triggers. Use Value to set default values for scheduled executions.',
mode: 'trigger',
},
{
id: 'scheduleSave',
type: 'schedule-save',

View File

@@ -1540,7 +1540,7 @@ export function useCollaborativeWorkflow() {
const config = {
id: nodeId,
nodes: childNodes,
iterations: Math.max(1, Math.min(100, count)), // Clamp between 1-100 for loops
iterations: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000 for loops
loopType: currentLoopType,
forEachItems: currentCollection,
}

View File

@@ -23,6 +23,8 @@ const FILTER_FIELDS = {
workflow: 'string',
trigger: 'string',
execution: 'string',
executionId: 'string',
workflowId: 'string',
id: 'string',
cost: 'number',
duration: 'number',
@@ -215,11 +217,13 @@ export function queryToApiParams(parsedQuery: ParsedQuery): Record<string, strin
break
case 'cost':
params[`cost_${filter.operator}_${filter.value}`] = 'true'
params.costOperator = filter.operator
params.costValue = String(filter.value)
break
case 'duration':
params[`duration_${filter.operator}_${filter.value}`] = 'true'
params.durationOperator = filter.operator
params.durationValue = String(filter.value)
break
}
}

View File

@@ -38,8 +38,6 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
{ value: 'info', label: 'Info', description: 'Info logs only' },
],
},
// Note: Trigger options are now dynamically populated from active logs
// Core types are included by default, integration triggers are added from actual log data
{
key: 'cost',
label: 'Cost',
@@ -82,14 +80,6 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
},
]
const CORE_TRIGGERS: TriggerData[] = [
{ value: 'api', label: 'API', color: '#3b82f6' },
{ value: 'manual', label: 'Manual', color: '#6b7280' },
{ value: 'webhook', label: 'Webhook', color: '#f97316' },
{ value: 'chat', label: 'Chat', color: '#8b5cf6' },
{ value: 'schedule', label: 'Schedule', color: '#10b981' },
]
export class SearchSuggestions {
private workflowsData: WorkflowData[]
private foldersData: FolderData[]
@@ -116,10 +106,10 @@ export class SearchSuggestions {
}
/**
* Get all triggers (core + integrations)
* Get all triggers from registry data
*/
private getAllTriggers(): TriggerData[] {
return [...CORE_TRIGGERS, ...this.triggersData]
return this.triggersData
}
/**
@@ -128,24 +118,20 @@ export class SearchSuggestions {
getSuggestions(input: string): SuggestionGroup | null {
const trimmed = input.trim()
// Empty input → show all filter keys
if (!trimmed) {
return this.getFilterKeysList()
}
// Input ends with ':' → show values for that key
if (trimmed.endsWith(':')) {
const key = trimmed.slice(0, -1)
return this.getFilterValues(key)
}
// Input contains ':' → filter value context
if (trimmed.includes(':')) {
const [key, partial] = trimmed.split(':')
return this.getFilterValues(key, partial)
}
// Plain text → multi-section results
return this.getMultiSectionResults(trimmed)
}
@@ -155,7 +141,6 @@ export class SearchSuggestions {
private getFilterKeysList(): SuggestionGroup {
const suggestions: Suggestion[] = []
// Add all filter keys
for (const filter of FILTER_DEFINITIONS) {
suggestions.push({
id: `filter-key-${filter.key}`,
@@ -166,7 +151,6 @@ export class SearchSuggestions {
})
}
// Add trigger key (always available - core types + integrations)
suggestions.push({
id: 'filter-key-trigger',
value: 'trigger:',
@@ -175,7 +159,6 @@ export class SearchSuggestions {
category: 'filters',
})
// Add workflow and folder keys
if (this.workflowsData.length > 0) {
suggestions.push({
id: 'filter-key-workflow',
@@ -249,12 +232,10 @@ export class SearchSuggestions {
: null
}
// Trigger filter values (core + integrations)
if (key === 'trigger') {
const allTriggers = this.getAllTriggers()
const suggestions = allTriggers
.filter((t) => !partial || t.label.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 15) // Show more since we have core + integrations
.map((t) => ({
id: `filter-value-trigger-${t.value}`,
value: `trigger:${t.value}`,
@@ -273,11 +254,9 @@ export class SearchSuggestions {
: null
}
// Workflow filter values
if (key === 'workflow') {
const suggestions = this.workflowsData
.filter((w) => !partial || w.name.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 8)
.map((w) => ({
id: `filter-value-workflow-${w.id}`,
value: `workflow:"${w.name}"`,
@@ -295,11 +274,9 @@ export class SearchSuggestions {
: null
}
// Folder filter values
if (key === 'folder') {
const suggestions = this.foldersData
.filter((f) => !partial || f.name.toLowerCase().includes(partial.toLowerCase()))
.slice(0, 8)
.map((f) => ({
id: `filter-value-folder-${f.id}`,
value: `folder:"${f.name}"`,
@@ -326,7 +303,6 @@ export class SearchSuggestions {
const sections: Array<{ title: string; suggestions: Suggestion[] }> = []
const allSuggestions: Suggestion[] = []
// Show all results option
const showAllSuggestion: Suggestion = {
id: 'show-all',
value: query,
@@ -335,7 +311,6 @@ export class SearchSuggestions {
}
allSuggestions.push(showAllSuggestion)
// Match filter values (e.g., "info" → "Status: Info")
const matchingFilterValues = this.getMatchingFilterValues(query)
if (matchingFilterValues.length > 0) {
sections.push({
@@ -345,7 +320,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingFilterValues)
}
// Match triggers
const matchingTriggers = this.getMatchingTriggers(query)
if (matchingTriggers.length > 0) {
sections.push({
@@ -355,7 +329,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingTriggers)
}
// Match workflows
const matchingWorkflows = this.getMatchingWorkflows(query)
if (matchingWorkflows.length > 0) {
sections.push({
@@ -365,7 +338,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingWorkflows)
}
// Match folders
const matchingFolders = this.getMatchingFolders(query)
if (matchingFolders.length > 0) {
sections.push({
@@ -375,7 +347,6 @@ export class SearchSuggestions {
allSuggestions.push(...matchingFolders)
}
// Add filter keys if no specific matches
if (
matchingFilterValues.length === 0 &&
matchingTriggers.length === 0 &&

View File

@@ -18,11 +18,70 @@ import { executeTool } from '@/tools'
const logger = createLogger('AzureOpenAIProvider')
/**
* Determines if the API version uses the Responses API (2025+) or Chat Completions API
*/
function useResponsesApi(apiVersion: string): boolean {
// 2025-* versions use the Responses API
// 2024-* and earlier versions use the Chat Completions API
return apiVersion.startsWith('2025-')
}
/**
* Helper function to convert an Azure OpenAI Responses API stream to a standard ReadableStream
* and collect completion metrics
*/
function createReadableStreamFromResponsesApiStream(
responsesStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
let fullContent = ''
let usageData: any = null
return new ReadableStream({
async start(controller) {
try {
for await (const event of responsesStream) {
if (event.usage) {
usageData = event.usage
}
if (event.type === 'response.output_text.delta') {
const content = event.delta || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
} else if (event.type === 'response.content_part.delta') {
const content = event.delta?.text || ''
if (content) {
fullContent += content
controller.enqueue(new TextEncoder().encode(content))
}
} else if (event.type === 'response.completed' || event.type === 'response.done') {
if (event.response?.usage) {
usageData = event.response.usage
}
}
}
if (onComplete) {
onComplete(fullContent, usageData)
}
controller.close()
} catch (error) {
controller.error(error)
}
},
})
}
/**
* Helper function to convert an Azure OpenAI stream to a standard ReadableStream
* and collect completion metrics
*/
function createReadableStreamFromAzureOpenAIStream(
function createReadableStreamFromChatCompletionsStream(
azureOpenAIStream: any,
onComplete?: (content: string, usage?: any) => void
): ReadableStream {
@@ -33,7 +92,6 @@ function createReadableStreamFromAzureOpenAIStream(
async start(controller) {
try {
for await (const chunk of azureOpenAIStream) {
// Check for usage data in the final chunk
if (chunk.usage) {
usageData = chunk.usage
}
@@ -45,7 +103,6 @@ function createReadableStreamFromAzureOpenAIStream(
}
}
// Once stream is complete, call the completion callback with the final content and usage
if (onComplete) {
onComplete(fullContent, usageData)
}
@@ -58,6 +115,430 @@ function createReadableStreamFromAzureOpenAIStream(
})
}
/**
* Executes a request using the Responses API (for 2025+ API versions)
*/
async function executeWithResponsesApi(
azureOpenAI: AzureOpenAI,
request: ProviderRequest,
deploymentName: string,
providerStartTime: number,
providerStartTimeISO: string
): Promise<ProviderResponse | StreamingExecution> {
const inputMessages: any[] = []
if (request.context) {
inputMessages.push({
role: 'user',
content: request.context,
})
}
if (request.messages) {
inputMessages.push(...request.messages)
}
const tools = request.tools?.length
? request.tools.map((tool) => ({
type: 'function' as const,
function: {
name: tool.id,
description: tool.description,
parameters: tool.parameters,
},
}))
: undefined
const payload: any = {
model: deploymentName,
input: inputMessages.length > 0 ? inputMessages : request.systemPrompt || '',
}
if (request.systemPrompt) {
payload.instructions = request.systemPrompt
}
if (request.temperature !== undefined) payload.temperature = request.temperature
if (request.maxTokens !== undefined) payload.max_output_tokens = request.maxTokens
if (request.reasoningEffort !== undefined) {
payload.reasoning = { effort: request.reasoningEffort }
}
if (request.responseFormat) {
payload.text = {
format: {
type: 'json_schema',
json_schema: {
name: request.responseFormat.name || 'response_schema',
schema: request.responseFormat.schema || request.responseFormat,
strict: request.responseFormat.strict !== false,
},
},
}
logger.info('Added JSON schema text format to Responses API request')
}
if (tools?.length) {
payload.tools = tools
const forcedTools = request.tools?.filter((t) => t.usageControl === 'force') || []
if (forcedTools.length > 0) {
if (forcedTools.length === 1) {
payload.tool_choice = {
type: 'function',
function: { name: forcedTools[0].id },
}
} else {
payload.tool_choice = 'required'
}
} else {
payload.tool_choice = 'auto'
}
logger.info('Responses API request configuration:', {
toolCount: tools.length,
model: deploymentName,
})
}
try {
if (request.stream && (!tools || tools.length === 0)) {
logger.info('Using streaming response for Responses API request')
const streamResponse = await (azureOpenAI as any).responses.create({
...payload,
stream: true,
})
const tokenUsage = {
prompt: 0,
completion: 0,
total: 0,
}
const streamingResult = {
stream: createReadableStreamFromResponsesApiStream(streamResponse, (content, usage) => {
streamingResult.execution.output.content = content
const streamEndTime = Date.now()
const streamEndTimeISO = new Date(streamEndTime).toISOString()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
streamEndTime
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
streamEndTime - providerStartTime
}
}
if (usage) {
streamingResult.execution.output.tokens = {
prompt: usage.input_tokens || usage.prompt_tokens || 0,
completion: usage.output_tokens || usage.completion_tokens || 0,
total:
(usage.input_tokens || usage.prompt_tokens || 0) +
(usage.output_tokens || usage.completion_tokens || 0),
}
}
}),
execution: {
success: true,
output: {
content: '',
model: request.model,
tokens: tokenUsage,
toolCalls: undefined,
providerTiming: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
timeSegments: [
{
type: 'model',
name: 'Streaming response',
startTime: providerStartTime,
endTime: Date.now(),
duration: Date.now() - providerStartTime,
},
],
},
},
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
},
},
} as StreamingExecution
return streamingResult
}
const initialCallTime = Date.now()
let currentResponse = await (azureOpenAI as any).responses.create(payload)
const firstResponseTime = Date.now() - initialCallTime
let content = currentResponse.output_text || ''
const tokens = {
prompt: currentResponse.usage?.input_tokens || 0,
completion: currentResponse.usage?.output_tokens || 0,
total:
(currentResponse.usage?.input_tokens || 0) + (currentResponse.usage?.output_tokens || 0),
}
const toolCalls: any[] = []
const toolResults: any[] = []
let iterationCount = 0
const MAX_ITERATIONS = 10
let modelTime = firstResponseTime
let toolsTime = 0
const timeSegments: TimeSegment[] = [
{
type: 'model',
name: 'Initial response',
startTime: initialCallTime,
endTime: initialCallTime + firstResponseTime,
duration: firstResponseTime,
},
]
while (iterationCount < MAX_ITERATIONS) {
const toolCallsInResponse =
currentResponse.output?.filter((item: any) => item.type === 'function_call') || []
if (toolCallsInResponse.length === 0) {
break
}
logger.info(
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
)
const toolsStartTime = Date.now()
for (const toolCall of toolCallsInResponse) {
try {
const toolName = toolCall.name
const toolArgs =
typeof toolCall.arguments === 'string'
? JSON.parse(toolCall.arguments)
: toolCall.arguments
const tool = request.tools?.find((t) => t.id === toolName)
if (!tool) continue
const toolCallStartTime = Date.now()
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
const result = await executeTool(toolName, executionParams, true)
const toolCallEndTime = Date.now()
const toolCallDuration = toolCallEndTime - toolCallStartTime
timeSegments.push({
type: 'tool',
name: toolName,
startTime: toolCallStartTime,
endTime: toolCallEndTime,
duration: toolCallDuration,
})
let resultContent: any
if (result.success) {
toolResults.push(result.output)
resultContent = result.output
} else {
resultContent = {
error: true,
message: result.error || 'Tool execution failed',
tool: toolName,
}
}
toolCalls.push({
name: toolName,
arguments: toolParams,
startTime: new Date(toolCallStartTime).toISOString(),
endTime: new Date(toolCallEndTime).toISOString(),
duration: toolCallDuration,
result: resultContent,
success: result.success,
})
// Add function call output to input for next request
inputMessages.push({
type: 'function_call_output',
call_id: toolCall.call_id || toolCall.id,
output: JSON.stringify(resultContent),
})
} catch (error) {
logger.error('Error processing tool call:', {
error,
toolName: toolCall?.name,
})
}
}
const thisToolsTime = Date.now() - toolsStartTime
toolsTime += thisToolsTime
// Make the next request
const nextModelStartTime = Date.now()
const nextPayload = {
...payload,
input: inputMessages,
tool_choice: 'auto',
}
currentResponse = await (azureOpenAI as any).responses.create(nextPayload)
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
timeSegments.push({
type: 'model',
name: `Model response (iteration ${iterationCount + 1})`,
startTime: nextModelStartTime,
endTime: nextModelEndTime,
duration: thisModelTime,
})
modelTime += thisModelTime
// Update content
if (currentResponse.output_text) {
content = currentResponse.output_text
}
// Update token counts
if (currentResponse.usage) {
tokens.prompt += currentResponse.usage.input_tokens || 0
tokens.completion += currentResponse.usage.output_tokens || 0
tokens.total = tokens.prompt + tokens.completion
}
iterationCount++
}
// Handle streaming for final response after tool processing
if (request.stream) {
logger.info('Using streaming for final response after tool processing (Responses API)')
const streamingPayload = {
...payload,
input: inputMessages,
tool_choice: 'auto',
stream: true,
}
const streamResponse = await (azureOpenAI as any).responses.create(streamingPayload)
const streamingResult = {
stream: createReadableStreamFromResponsesApiStream(streamResponse, (content, usage) => {
streamingResult.execution.output.content = content
if (usage) {
streamingResult.execution.output.tokens = {
prompt: usage.input_tokens || tokens.prompt,
completion: usage.output_tokens || tokens.completion,
total:
(usage.input_tokens || tokens.prompt) + (usage.output_tokens || tokens.completion),
}
}
}),
execution: {
success: true,
output: {
content: '',
model: request.model,
tokens: {
prompt: tokens.prompt,
completion: tokens.completion,
total: tokens.total,
},
toolCalls:
toolCalls.length > 0
? {
list: toolCalls,
count: toolCalls.length,
}
: undefined,
providerTiming: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
modelTime: modelTime,
toolsTime: toolsTime,
firstResponseTime: firstResponseTime,
iterations: iterationCount + 1,
timeSegments: timeSegments,
},
},
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
duration: Date.now() - providerStartTime,
},
},
} as StreamingExecution
return streamingResult
}
// Calculate overall timing
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
return {
content,
model: request.model,
tokens,
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
toolResults: toolResults.length > 0 ? toolResults : undefined,
timing: {
startTime: providerStartTimeISO,
endTime: providerEndTimeISO,
duration: totalDuration,
modelTime: modelTime,
toolsTime: toolsTime,
firstResponseTime: firstResponseTime,
iterations: iterationCount + 1,
timeSegments: timeSegments,
},
}
} catch (error) {
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
logger.error('Error in Responses API request:', {
error,
duration: totalDuration,
})
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
// @ts-ignore - Adding timing property to the error
enhancedError.timing = {
startTime: providerStartTimeISO,
endTime: providerEndTimeISO,
duration: totalDuration,
}
throw enhancedError
}
}
/**
* Azure OpenAI provider configuration
*/
@@ -85,8 +566,7 @@ export const azureOpenAIProvider: ProviderConfig = {
// Extract Azure-specific configuration from request or environment
// Priority: request parameters > environment variables
const azureEndpoint = request.azureEndpoint || env.AZURE_OPENAI_ENDPOINT
const azureApiVersion =
request.azureApiVersion || env.AZURE_OPENAI_API_VERSION || '2024-07-01-preview'
const azureApiVersion = request.azureApiVersion || env.AZURE_OPENAI_API_VERSION || '2024-10-21'
if (!azureEndpoint) {
throw new Error(
@@ -101,6 +581,34 @@ export const azureOpenAIProvider: ProviderConfig = {
endpoint: azureEndpoint,
})
// Build deployment name - use deployment name instead of model name
const deploymentName = (request.model || 'azure/gpt-4o').replace('azure/', '')
// Start execution timer for the entire provider execution
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
// Check if we should use the Responses API (2025+ versions)
if (useResponsesApi(azureApiVersion)) {
logger.info('Using Responses API for Azure OpenAI request', {
apiVersion: azureApiVersion,
model: deploymentName,
})
return executeWithResponsesApi(
azureOpenAI,
request,
deploymentName,
providerStartTime,
providerStartTimeISO
)
}
// Continue with Chat Completions API for 2024 and earlier versions
logger.info('Using Chat Completions API for Azure OpenAI request', {
apiVersion: azureApiVersion,
model: deploymentName,
})
// Start with an empty array for all messages
const allMessages = []
@@ -137,8 +645,7 @@ export const azureOpenAIProvider: ProviderConfig = {
}))
: undefined
// Build the request payload - use deployment name instead of model name
const deploymentName = (request.model || 'azure/gpt-4o').replace('azure/', '')
// Build the request payload
const payload: any = {
model: deploymentName, // Azure OpenAI uses deployment name
messages: allMessages,
@@ -195,23 +702,16 @@ export const azureOpenAIProvider: ProviderConfig = {
}
}
// Start execution timer for the entire provider execution
const providerStartTime = Date.now()
const providerStartTimeISO = new Date(providerStartTime).toISOString()
try {
// Check if we can stream directly (no tools required)
if (request.stream && (!tools || tools.length === 0)) {
logger.info('Using streaming response for Azure OpenAI request')
// Create a streaming request with token usage tracking
const streamResponse = await azureOpenAI.chat.completions.create({
...payload,
stream: true,
stream_options: { include_usage: true },
})
// Start collecting token usage from the stream
const tokenUsage = {
prompt: 0,
completion: 0,
@@ -220,47 +720,44 @@ export const azureOpenAIProvider: ProviderConfig = {
let _streamContent = ''
// Create a StreamingExecution response with a callback to update content and tokens
const streamingResult = {
stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => {
// Update the execution data with the final content and token usage
_streamContent = content
streamingResult.execution.output.content = content
stream: createReadableStreamFromChatCompletionsStream(
streamResponse,
(content, usage) => {
_streamContent = content
streamingResult.execution.output.content = content
// Update the timing information with the actual completion time
const streamEndTime = Date.now()
const streamEndTimeISO = new Date(streamEndTime).toISOString()
const streamEndTime = Date.now()
const streamEndTimeISO = new Date(streamEndTime).toISOString()
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
// Update the time segment as well
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
streamEndTime
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
if (streamingResult.execution.output.providerTiming) {
streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO
streamingResult.execution.output.providerTiming.duration =
streamEndTime - providerStartTime
}
}
// Update token usage if available from the stream
if (usage) {
const newTokens = {
prompt: usage.prompt_tokens || tokenUsage.prompt,
completion: usage.completion_tokens || tokenUsage.completion,
total: usage.total_tokens || tokenUsage.total,
if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) {
streamingResult.execution.output.providerTiming.timeSegments[0].endTime =
streamEndTime
streamingResult.execution.output.providerTiming.timeSegments[0].duration =
streamEndTime - providerStartTime
}
}
streamingResult.execution.output.tokens = newTokens
if (usage) {
const newTokens = {
prompt: usage.prompt_tokens || tokenUsage.prompt,
completion: usage.completion_tokens || tokenUsage.completion,
total: usage.total_tokens || tokenUsage.total,
}
streamingResult.execution.output.tokens = newTokens
}
}
// We don't need to estimate tokens here as logger.ts will handle that
}),
),
execution: {
success: true,
output: {
content: '', // Will be filled by the stream completion callback
content: '',
model: request.model,
tokens: tokenUsage,
toolCalls: undefined,
@@ -278,9 +775,8 @@ export const azureOpenAIProvider: ProviderConfig = {
},
],
},
// Cost will be calculated in logger
},
logs: [], // No block logs for direct streaming
logs: [],
metadata: {
startTime: providerStartTimeISO,
endTime: new Date().toISOString(),
@@ -289,21 +785,16 @@ export const azureOpenAIProvider: ProviderConfig = {
},
} as StreamingExecution
// Return the streaming execution object with explicit casting
return streamingResult as StreamingExecution
}
// Make the initial API request
const initialCallTime = Date.now()
// Track the original tool_choice for forced tool tracking
const originalToolChoice = payload.tool_choice
// Track forced tools and their usage
const forcedTools = preparedTools?.forcedTools || []
let usedForcedTools: string[] = []
// Helper function to check for forced tool usage in responses
const checkForForcedToolUsage = (
response: any,
toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }
@@ -327,7 +818,6 @@ export const azureOpenAIProvider: ProviderConfig = {
const firstResponseTime = Date.now() - initialCallTime
let content = currentResponse.choices[0]?.message?.content || ''
// Collect token information but don't calculate costs - that will be done in logger.ts
const tokens = {
prompt: currentResponse.usage?.prompt_tokens || 0,
completion: currentResponse.usage?.completion_tokens || 0,
@@ -337,16 +827,13 @@ export const azureOpenAIProvider: ProviderConfig = {
const toolResults = []
const currentMessages = [...allMessages]
let iterationCount = 0
const MAX_ITERATIONS = 10 // Prevent infinite loops
const MAX_ITERATIONS = 10
// Track time spent in model vs tools
let modelTime = firstResponseTime
let toolsTime = 0
// Track if a forced tool has been used
let hasUsedForcedTool = false
// Track each model and tool call segment with timestamps
const timeSegments: TimeSegment[] = [
{
type: 'model',
@@ -357,11 +844,9 @@ export const azureOpenAIProvider: ProviderConfig = {
},
]
// Check if a forced tool was used in the first response
checkForForcedToolUsage(currentResponse, originalToolChoice)
while (iterationCount < MAX_ITERATIONS) {
// Check for tool calls
const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls
if (!toolCallsInResponse || toolCallsInResponse.length === 0) {
break
@@ -371,20 +856,16 @@ export const azureOpenAIProvider: ProviderConfig = {
`Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})`
)
// Track time for tool calls in this batch
const toolsStartTime = Date.now()
// Process each tool call
for (const toolCall of toolCallsInResponse) {
try {
const toolName = toolCall.function.name
const toolArgs = JSON.parse(toolCall.function.arguments)
// Get the tool from the tools registry
const tool = request.tools?.find((t) => t.id === toolName)
if (!tool) continue
// Execute the tool
const toolCallStartTime = Date.now()
const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request)
@@ -393,7 +874,6 @@ export const azureOpenAIProvider: ProviderConfig = {
const toolCallEndTime = Date.now()
const toolCallDuration = toolCallEndTime - toolCallStartTime
// Add to time segments for both success and failure
timeSegments.push({
type: 'tool',
name: toolName,
@@ -402,13 +882,11 @@ export const azureOpenAIProvider: ProviderConfig = {
duration: toolCallDuration,
})
// Prepare result content for the LLM
let resultContent: any
if (result.success) {
toolResults.push(result.output)
resultContent = result.output
} else {
// Include error information so LLM can respond appropriately
resultContent = {
error: true,
message: result.error || 'Tool execution failed',
@@ -426,7 +904,6 @@ export const azureOpenAIProvider: ProviderConfig = {
success: result.success,
})
// Add the tool call and result to messages (both success and failure)
currentMessages.push({
role: 'assistant',
content: null,
@@ -455,48 +932,38 @@ export const azureOpenAIProvider: ProviderConfig = {
}
}
// Calculate tool call time for this iteration
const thisToolsTime = Date.now() - toolsStartTime
toolsTime += thisToolsTime
// Make the next request with updated messages
const nextPayload = {
...payload,
messages: currentMessages,
}
// Update tool_choice based on which forced tools have been used
if (typeof originalToolChoice === 'object' && hasUsedForcedTool && forcedTools.length > 0) {
// If we have remaining forced tools, get the next one to force
const remainingTools = forcedTools.filter((tool) => !usedForcedTools.includes(tool))
if (remainingTools.length > 0) {
// Force the next tool
nextPayload.tool_choice = {
type: 'function',
function: { name: remainingTools[0] },
}
logger.info(`Forcing next tool: ${remainingTools[0]}`)
} else {
// All forced tools have been used, switch to auto
nextPayload.tool_choice = 'auto'
logger.info('All forced tools have been used, switching to auto tool_choice')
}
}
// Time the next model call
const nextModelStartTime = Date.now()
// Make the next request
currentResponse = await azureOpenAI.chat.completions.create(nextPayload)
// Check if any forced tools were used in this response
checkForForcedToolUsage(currentResponse, nextPayload.tool_choice)
const nextModelEndTime = Date.now()
const thisModelTime = nextModelEndTime - nextModelStartTime
// Add to time segments
timeSegments.push({
type: 'model',
name: `Model response (iteration ${iterationCount + 1})`,
@@ -505,15 +972,12 @@ export const azureOpenAIProvider: ProviderConfig = {
duration: thisModelTime,
})
// Add to model time
modelTime += thisModelTime
// Update content if we have a text response
if (currentResponse.choices[0]?.message?.content) {
content = currentResponse.choices[0].message.content
}
// Update token counts
if (currentResponse.usage) {
tokens.prompt += currentResponse.usage.prompt_tokens || 0
tokens.completion += currentResponse.usage.completion_tokens || 0
@@ -523,46 +987,43 @@ export const azureOpenAIProvider: ProviderConfig = {
iterationCount++
}
// After all tool processing complete, if streaming was requested, use streaming for the final response
if (request.stream) {
logger.info('Using streaming for final response after tool processing')
// When streaming after tool calls with forced tools, make sure tool_choice is set to 'auto'
// This prevents Azure OpenAI API from trying to force tool usage again in the final streaming response
const streamingPayload = {
...payload,
messages: currentMessages,
tool_choice: 'auto', // Always use 'auto' for the streaming response after tool calls
tool_choice: 'auto',
stream: true,
stream_options: { include_usage: true },
}
const streamResponse = await azureOpenAI.chat.completions.create(streamingPayload)
// Create the StreamingExecution object with all collected data
let _streamContent = ''
const streamingResult = {
stream: createReadableStreamFromAzureOpenAIStream(streamResponse, (content, usage) => {
// Update the execution data with the final content and token usage
_streamContent = content
streamingResult.execution.output.content = content
stream: createReadableStreamFromChatCompletionsStream(
streamResponse,
(content, usage) => {
_streamContent = content
streamingResult.execution.output.content = content
// Update token usage if available from the stream
if (usage) {
const newTokens = {
prompt: usage.prompt_tokens || tokens.prompt,
completion: usage.completion_tokens || tokens.completion,
total: usage.total_tokens || tokens.total,
if (usage) {
const newTokens = {
prompt: usage.prompt_tokens || tokens.prompt,
completion: usage.completion_tokens || tokens.completion,
total: usage.total_tokens || tokens.total,
}
streamingResult.execution.output.tokens = newTokens
}
streamingResult.execution.output.tokens = newTokens
}
}),
),
execution: {
success: true,
output: {
content: '', // Will be filled by the callback
content: '',
model: request.model,
tokens: {
prompt: tokens.prompt,
@@ -597,11 +1058,9 @@ export const azureOpenAIProvider: ProviderConfig = {
},
} as StreamingExecution
// Return the streaming execution object with explicit casting
return streamingResult as StreamingExecution
}
// Calculate overall timing
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
@@ -622,10 +1081,8 @@ export const azureOpenAIProvider: ProviderConfig = {
iterations: iterationCount + 1,
timeSegments: timeSegments,
},
// We're not calculating cost here as it will be handled in logger.ts
}
} catch (error) {
// Include timing information even for errors
const providerEndTime = Date.now()
const providerEndTimeISO = new Date(providerEndTime).toISOString()
const totalDuration = providerEndTime - providerStartTime
@@ -635,7 +1092,6 @@ export const azureOpenAIProvider: ProviderConfig = {
duration: totalDuration,
})
// Create a new error with timing information
const enhancedError = new Error(error instanceof Error ? error.message : String(error))
// @ts-ignore - Adding timing property to the error
enhancedError.timing = {

View File

@@ -402,8 +402,7 @@ export class Serializer {
// Second pass: filter by mode and conditions
Object.entries(block.subBlocks).forEach(([id, subBlock]) => {
// Find the corresponding subblock config to check its mode and condition
const subBlockConfig = blockConfig.subBlocks.find((config) => config.id === id)
const matchingConfigs = blockConfig.subBlocks.filter((config) => config.id === id)
// Include field if it matches current mode OR if it's the starter inputFormat with values
const hasStarterInputFormatValues =
@@ -417,13 +416,17 @@ export class Serializer {
const isLegacyAgentField =
isAgentBlock && ['systemPrompt', 'userPrompt', 'memories'].includes(id)
// Check if field's condition is met (conditionally-hidden fields should be excluded)
const conditionMet = subBlockConfig
? evaluateCondition(subBlockConfig.condition, allValues)
: true
const anyConditionMet =
matchingConfigs.length === 0
? true
: matchingConfigs.some(
(config) =>
shouldIncludeField(config, isAdvancedMode) &&
evaluateCondition(config.condition, allValues)
)
if (
(subBlockConfig && shouldIncludeField(subBlockConfig, isAdvancedMode) && conditionMet) ||
(matchingConfigs.length > 0 && anyConditionMet) ||
hasStarterInputFormatValues ||
isLegacyAgentField
) {
@@ -540,26 +543,26 @@ export class Serializer {
// Iterate through the tool's parameters, not the block's subBlocks
Object.entries(currentTool.params || {}).forEach(([paramId, paramConfig]) => {
if (paramConfig.required && paramConfig.visibility === 'user-only') {
const subBlockConfig = blockConfig.subBlocks?.find((sb: any) => sb.id === paramId)
const matchingConfigs = blockConfig.subBlocks?.filter((sb: any) => sb.id === paramId) || []
let shouldValidateParam = true
if (subBlockConfig) {
if (matchingConfigs.length > 0) {
const isAdvancedMode = block.advancedMode ?? false
const includedByMode = shouldIncludeField(subBlockConfig, isAdvancedMode)
// Check visibility condition
const includedByCondition = evaluateCondition(subBlockConfig.condition, params)
shouldValidateParam = matchingConfigs.some((subBlockConfig: any) => {
const includedByMode = shouldIncludeField(subBlockConfig, isAdvancedMode)
// Check if field is required based on its required condition (if it's a condition object)
const isRequired = (() => {
if (!subBlockConfig.required) return false
if (typeof subBlockConfig.required === 'boolean') return subBlockConfig.required
// If required is a condition object, evaluate it
return evaluateCondition(subBlockConfig.required, params)
})()
const includedByCondition = evaluateCondition(subBlockConfig.condition, params)
shouldValidateParam = includedByMode && includedByCondition && isRequired
const isRequired = (() => {
if (!subBlockConfig.required) return false
if (typeof subBlockConfig.required === 'boolean') return subBlockConfig.required
return evaluateCondition(subBlockConfig.required, params)
})()
return includedByMode && includedByCondition && isRequired
})
}
if (!shouldValidateParam) {
@@ -568,7 +571,12 @@ export class Serializer {
const fieldValue = params[paramId]
if (fieldValue === undefined || fieldValue === null || fieldValue === '') {
const displayName = subBlockConfig?.title || paramId
const activeConfig = matchingConfigs.find(
(config: any) =>
shouldIncludeField(config, block.advancedMode ?? false) &&
evaluateCondition(config.condition, params)
)
const displayName = activeConfig?.title || paramId
missingFields.push(displayName)
}
}
@@ -596,8 +604,6 @@ export class Serializer {
const accessibleIds = new Set<string>(ancestorIds)
accessibleIds.add(blockId)
// Only add starter block if it's actually upstream (already in ancestorIds)
// Don't add it just because it exists on the canvas
if (starterBlock && ancestorIds.includes(starterBlock.id)) {
accessibleIds.add(starterBlock.id)
}

View File

@@ -22,10 +22,9 @@ describe('workflow store', () => {
})
describe('loop management', () => {
it('should regenerate loops when updateLoopCount is called', () => {
it.concurrent('should regenerate loops when updateLoopCount is called', () => {
const { addBlock, updateLoopCount } = useWorkflowStore.getState()
// Add a loop block
addBlock(
'loop1',
'loop',
@@ -38,23 +37,19 @@ describe('workflow store', () => {
}
)
// Update loop count
updateLoopCount('loop1', 10)
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.loop1?.data?.count).toBe(10)
// Check that loops were regenerated
expect(state.loops.loop1).toBeDefined()
expect(state.loops.loop1.iterations).toBe(10)
})
it('should regenerate loops when updateLoopType is called', () => {
it.concurrent('should regenerate loops when updateLoopType is called', () => {
const { addBlock, updateLoopType } = useWorkflowStore.getState()
// Add a loop block
addBlock(
'loop1',
'loop',
@@ -67,24 +62,20 @@ describe('workflow store', () => {
}
)
// Update loop type
updateLoopType('loop1', 'forEach')
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.loop1?.data?.loopType).toBe('forEach')
// Check that loops were regenerated with forEach items
expect(state.loops.loop1).toBeDefined()
expect(state.loops.loop1.loopType).toBe('forEach')
expect(state.loops.loop1.forEachItems).toEqual(['a', 'b', 'c'])
expect(state.loops.loop1.forEachItems).toBe('["a", "b", "c"]')
})
it('should regenerate loops when updateLoopCollection is called', () => {
it.concurrent('should regenerate loops when updateLoopCollection is called', () => {
const { addBlock, updateLoopCollection } = useWorkflowStore.getState()
// Add a forEach loop block
addBlock(
'loop1',
'loop',
@@ -96,23 +87,19 @@ describe('workflow store', () => {
}
)
// Update loop collection
updateLoopCollection('loop1', '["item1", "item2", "item3"]')
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.loop1?.data?.collection).toBe('["item1", "item2", "item3"]')
// Check that loops were regenerated with new items
expect(state.loops.loop1).toBeDefined()
expect(state.loops.loop1.forEachItems).toEqual(['item1', 'item2', 'item3'])
expect(state.loops.loop1.forEachItems).toBe('["item1", "item2", "item3"]')
})
it('should clamp loop count between 1 and 100', () => {
it.concurrent('should clamp loop count between 1 and 1000', () => {
const { addBlock, updateLoopCount } = useWorkflowStore.getState()
// Add a loop block
addBlock(
'loop1',
'loop',
@@ -125,12 +112,10 @@ describe('workflow store', () => {
}
)
// Try to set count above max
updateLoopCount('loop1', 150)
updateLoopCount('loop1', 1500)
let state = useWorkflowStore.getState()
expect(state.blocks.loop1?.data?.count).toBe(100)
expect(state.blocks.loop1?.data?.count).toBe(1000)
// Try to set count below min
updateLoopCount('loop1', 0)
state = useWorkflowStore.getState()
expect(state.blocks.loop1?.data?.count).toBe(1)
@@ -138,10 +123,9 @@ describe('workflow store', () => {
})
describe('parallel management', () => {
it('should regenerate parallels when updateParallelCount is called', () => {
it.concurrent('should regenerate parallels when updateParallelCount is called', () => {
const { addBlock, updateParallelCount } = useWorkflowStore.getState()
// Add a parallel block
addBlock(
'parallel1',
'parallel',
@@ -153,23 +137,19 @@ describe('workflow store', () => {
}
)
// Update parallel count
updateParallelCount('parallel1', 5)
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.parallel1?.data?.count).toBe(5)
// Check that parallels were regenerated
expect(state.parallels.parallel1).toBeDefined()
expect(state.parallels.parallel1.distribution).toBe('')
})
it('should regenerate parallels when updateParallelCollection is called', () => {
it.concurrent('should regenerate parallels when updateParallelCollection is called', () => {
const { addBlock, updateParallelCollection } = useWorkflowStore.getState()
// Add a parallel block
addBlock(
'parallel1',
'parallel',
@@ -182,27 +162,22 @@ describe('workflow store', () => {
}
)
// Update parallel collection
updateParallelCollection('parallel1', '["item1", "item2", "item3"]')
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.parallel1?.data?.collection).toBe('["item1", "item2", "item3"]')
// Check that parallels were regenerated
expect(state.parallels.parallel1).toBeDefined()
expect(state.parallels.parallel1.distribution).toBe('["item1", "item2", "item3"]')
// Verify that the parallel count matches the collection size
const parsedDistribution = JSON.parse(state.parallels.parallel1.distribution as string)
expect(parsedDistribution).toHaveLength(3)
})
it('should clamp parallel count between 1 and 20', () => {
it.concurrent('should clamp parallel count between 1 and 20', () => {
const { addBlock, updateParallelCount } = useWorkflowStore.getState()
// Add a parallel block
addBlock(
'parallel1',
'parallel',
@@ -214,21 +189,18 @@ describe('workflow store', () => {
}
)
// Try to set count above max
updateParallelCount('parallel1', 100)
let state = useWorkflowStore.getState()
expect(state.blocks.parallel1?.data?.count).toBe(20)
// Try to set count below min
updateParallelCount('parallel1', 0)
state = useWorkflowStore.getState()
expect(state.blocks.parallel1?.data?.count).toBe(1)
})
it('should regenerate parallels when updateParallelType is called', () => {
it.concurrent('should regenerate parallels when updateParallelType is called', () => {
const { addBlock, updateParallelType } = useWorkflowStore.getState()
// Add a parallel block with default collection type
addBlock(
'parallel1',
'parallel',
@@ -241,50 +213,40 @@ describe('workflow store', () => {
}
)
// Update parallel type to count
updateParallelType('parallel1', 'count')
const state = useWorkflowStore.getState()
// Check that block data was updated
expect(state.blocks.parallel1?.data?.parallelType).toBe('count')
// Check that parallels were regenerated with new type
expect(state.parallels.parallel1).toBeDefined()
expect(state.parallels.parallel1.parallelType).toBe('count')
})
})
describe('mode switching', () => {
it('should toggle advanced mode on a block', () => {
it.concurrent('should toggle advanced mode on a block', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Initially should be in basic mode (advancedMode: false)
let state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(false)
// Toggle to advanced mode
toggleBlockAdvancedMode('agent1')
state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(true)
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(false)
})
it('should preserve systemPrompt and userPrompt when switching modes', () => {
it.concurrent('should preserve systemPrompt and userPrompt when switching modes', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
const { setState: setSubBlockState } = useSubBlockStore
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Set initial values in basic mode
setSubBlockState({
workflowValues: {
'test-workflow': {
@@ -295,9 +257,7 @@ describe('workflow store', () => {
},
},
})
// Toggle to advanced mode
toggleBlockAdvancedMode('agent1')
// Check that prompts are preserved in advanced mode
let subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
@@ -305,9 +265,7 @@ describe('workflow store', () => {
expect(subBlockState.workflowValues['test-workflow'].agent1.userPrompt).toBe(
'Hello, how are you?'
)
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
// Check that prompts are still preserved
subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
@@ -317,20 +275,16 @@ describe('workflow store', () => {
)
})
it('should preserve memories when switching from advanced to basic mode', () => {
it.concurrent('should preserve memories when switching from advanced to basic mode', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
const { setState: setSubBlockState } = useSubBlockStore
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block in advanced mode
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// First toggle to advanced mode
toggleBlockAdvancedMode('agent1')
// Set values including memories
setSubBlockState({
workflowValues: {
'test-workflow': {
@@ -346,10 +300,8 @@ describe('workflow store', () => {
},
})
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
// Check that prompts and memories are all preserved
const subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
@@ -363,52 +315,50 @@ describe('workflow store', () => {
])
})
it('should handle mode switching when no subblock values exist', () => {
it.concurrent('should handle mode switching when no subblock values exist', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Toggle modes without any subblock values set
expect(useWorkflowStore.getState().blocks.agent1?.advancedMode).toBe(false)
expect(() => toggleBlockAdvancedMode('agent1')).not.toThrow()
// Verify the mode changed
const state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(true)
})
it('should not throw when toggling non-existent block', () => {
it.concurrent('should not throw when toggling non-existent block', () => {
const { toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Try to toggle a block that doesn't exist
expect(() => toggleBlockAdvancedMode('non-existent')).not.toThrow()
})
})
describe('addBlock with blockProperties', () => {
it('should create a block with default properties when no blockProperties provided', () => {
const { addBlock } = useWorkflowStore.getState()
it.concurrent(
'should create a block with default properties when no blockProperties provided',
() => {
const { addBlock } = useWorkflowStore.getState()
addBlock('agent1', 'agent', 'Test Agent', { x: 100, y: 200 })
addBlock('agent1', 'agent', 'Test Agent', { x: 100, y: 200 })
const state = useWorkflowStore.getState()
const block = state.blocks.agent1
const state = useWorkflowStore.getState()
const block = state.blocks.agent1
expect(block).toBeDefined()
expect(block.id).toBe('agent1')
expect(block.type).toBe('agent')
expect(block.name).toBe('Test Agent')
expect(block.position).toEqual({ x: 100, y: 200 })
expect(block.enabled).toBe(true)
expect(block.horizontalHandles).toBe(true)
expect(block.height).toBe(0)
})
expect(block).toBeDefined()
expect(block.id).toBe('agent1')
expect(block.type).toBe('agent')
expect(block.name).toBe('Test Agent')
expect(block.position).toEqual({ x: 100, y: 200 })
expect(block.enabled).toBe(true)
expect(block.horizontalHandles).toBe(true)
expect(block.height).toBe(0)
}
)
it('should create a block with custom blockProperties for regular blocks', () => {
it.concurrent('should create a block with custom blockProperties for regular blocks', () => {
const { addBlock } = useWorkflowStore.getState()
addBlock(
@@ -524,10 +474,8 @@ describe('workflow store', () => {
it('should handle blockProperties with parent relationships', () => {
const { addBlock } = useWorkflowStore.getState()
// First add a parent loop block
addBlock('loop1', 'loop', 'Parent Loop', { x: 0, y: 0 })
// Then add a child block with custom properties
addBlock(
'agent1',
'agent',
@@ -571,7 +519,7 @@ describe('workflow store', () => {
addBlock('block3', 'trigger', 'Start', { x: 200, y: 0 })
})
it('should have test blocks set up correctly', () => {
it.concurrent('should have test blocks set up correctly', () => {
const state = useWorkflowStore.getState()
expect(state.blocks.block1).toBeDefined()
@@ -582,7 +530,7 @@ describe('workflow store', () => {
expect(state.blocks.block3.name).toBe('Start')
})
it('should successfully rename a block when no conflicts exist', () => {
it.concurrent('should successfully rename a block when no conflicts exist', () => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('block1', 'Data Processor')
@@ -593,18 +541,21 @@ describe('workflow store', () => {
expect(state.blocks.block1.name).toBe('Data Processor')
})
it('should allow renaming a block to a different case/spacing of its current name', () => {
const { updateBlockName } = useWorkflowStore.getState()
it.concurrent(
'should allow renaming a block to a different case/spacing of its current name',
() => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('block1', 'column ad')
const result = updateBlockName('block1', 'column ad')
expect(result.success).toBe(true)
expect(result.success).toBe(true)
const state = useWorkflowStore.getState()
expect(state.blocks.block1.name).toBe('column ad')
})
const state = useWorkflowStore.getState()
expect(state.blocks.block1.name).toBe('column ad')
}
)
it('should prevent renaming when another block has the same normalized name', () => {
it.concurrent('should prevent renaming when another block has the same normalized name', () => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('block2', 'Column AD')
@@ -615,29 +566,35 @@ describe('workflow store', () => {
expect(state.blocks.block2.name).toBe('Employee Length')
})
it('should prevent renaming when another block has a name that normalizes to the same value', () => {
const { updateBlockName } = useWorkflowStore.getState()
it.concurrent(
'should prevent renaming when another block has a name that normalizes to the same value',
() => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('block2', 'columnad')
const result = updateBlockName('block2', 'columnad')
expect(result.success).toBe(false)
expect(result.success).toBe(false)
const state = useWorkflowStore.getState()
expect(state.blocks.block2.name).toBe('Employee Length')
})
const state = useWorkflowStore.getState()
expect(state.blocks.block2.name).toBe('Employee Length')
}
)
it('should prevent renaming when another block has a similar name with different spacing', () => {
const { updateBlockName } = useWorkflowStore.getState()
it.concurrent(
'should prevent renaming when another block has a similar name with different spacing',
() => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('block3', 'employee length')
const result = updateBlockName('block3', 'employee length')
expect(result.success).toBe(false)
expect(result.success).toBe(false)
const state = useWorkflowStore.getState()
expect(state.blocks.block3.name).toBe('Start')
})
const state = useWorkflowStore.getState()
expect(state.blocks.block3.name).toBe('Start')
}
)
it('should handle edge cases with empty or whitespace-only names', () => {
it.concurrent('should handle edge cases with empty or whitespace-only names', () => {
const { updateBlockName } = useWorkflowStore.getState()
const result1 = updateBlockName('block1', '')
@@ -651,7 +608,7 @@ describe('workflow store', () => {
expect(state.blocks.block2.name).toBe(' ')
})
it('should return false when trying to rename a non-existent block', () => {
it.concurrent('should return false when trying to rename a non-existent block', () => {
const { updateBlockName } = useWorkflowStore.getState()
const result = updateBlockName('nonexistent', 'New Name')

View File

@@ -850,7 +850,7 @@ export const useWorkflowStore = create<WorkflowStore>()(
...block,
data: {
...block.data,
count: Math.max(1, Math.min(100, count)), // Clamp between 1-100
count: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000
},
},
}

View File

@@ -3,7 +3,7 @@ import type { BlockState } from '@/stores/workflows/workflow/types'
import { convertLoopBlockToLoop } from '@/stores/workflows/workflow/utils'
describe('convertLoopBlockToLoop', () => {
it.concurrent('should parse JSON array string for forEach loops', () => {
it.concurrent('should keep JSON array string as-is for forEach loops', () => {
const blocks: Record<string, BlockState> = {
loop1: {
id: 'loop1',
@@ -25,11 +25,11 @@ describe('convertLoopBlockToLoop', () => {
expect(result).toBeDefined()
expect(result?.loopType).toBe('forEach')
expect(result?.forEachItems).toEqual(['item1', 'item2', 'item3'])
expect(result?.forEachItems).toBe('["item1", "item2", "item3"]')
expect(result?.iterations).toBe(10)
})
it.concurrent('should parse JSON object string for forEach loops', () => {
it.concurrent('should keep JSON object string as-is for forEach loops', () => {
const blocks: Record<string, BlockState> = {
loop1: {
id: 'loop1',
@@ -51,7 +51,7 @@ describe('convertLoopBlockToLoop', () => {
expect(result).toBeDefined()
expect(result?.loopType).toBe('forEach')
expect(result?.forEachItems).toEqual({ key1: 'value1', key2: 'value2' })
expect(result?.forEachItems).toBe('{"key1": "value1", "key2": "value2"}')
})
it.concurrent('should keep string as-is if not valid JSON', () => {
@@ -125,7 +125,6 @@ describe('convertLoopBlockToLoop', () => {
expect(result).toBeDefined()
expect(result?.loopType).toBe('for')
expect(result?.iterations).toBe(5)
// For 'for' loops, the collection is still parsed in case it's later changed to forEach
expect(result?.forEachItems).toEqual(['should', 'not', 'matter'])
expect(result?.forEachItems).toBe('["should", "not", "matter"]')
})
})

View File

@@ -25,28 +25,8 @@ export function convertLoopBlockToLoop(
loopType,
}
// Load ALL fields regardless of current loop type
// This allows switching between loop types without losing data
// For for/forEach loops, read from collection (block data) and map to forEachItems (loops store)
let forEachItems: any = loopBlock.data?.collection || ''
if (typeof forEachItems === 'string' && forEachItems.trim()) {
const trimmed = forEachItems.trim()
// Try to parse if it looks like JSON
if (trimmed.startsWith('[') || trimmed.startsWith('{')) {
try {
forEachItems = JSON.parse(trimmed)
} catch {
// Keep as string if parsing fails - will be evaluated at runtime
}
}
}
loop.forEachItems = forEachItems
// For while loops, use whileCondition
loop.forEachItems = loopBlock.data?.collection || ''
loop.whileCondition = loopBlock.data?.whileCondition || ''
// For do-while loops, use doWhileCondition
loop.doWhileCondition = loopBlock.data?.doWhileCondition || ''
return loop
@@ -66,16 +46,13 @@ export function convertParallelBlockToParallel(
const parallelBlock = blocks[parallelBlockId]
if (!parallelBlock || parallelBlock.type !== 'parallel') return undefined
// Get the parallel type from block data, defaulting to 'count' for consistency
const parallelType = parallelBlock.data?.parallelType || 'count'
// Validate parallelType against allowed values
const validParallelTypes = ['collection', 'count'] as const
const validatedParallelType = validParallelTypes.includes(parallelType as any)
? parallelType
: 'collection'
// Only set distribution if it's a collection-based parallel
const distribution =
validatedParallelType === 'collection' ? parallelBlock.data?.collection || '' : ''
@@ -139,7 +116,6 @@ export function findAllDescendantNodes(
export function generateLoopBlocks(blocks: Record<string, BlockState>): Record<string, Loop> {
const loops: Record<string, Loop> = {}
// Find all loop nodes
Object.entries(blocks)
.filter(([_, block]) => block.type === 'loop')
.forEach(([id, block]) => {
@@ -163,7 +139,6 @@ export function generateParallelBlocks(
): Record<string, Parallel> {
const parallels: Record<string, Parallel> = {}
// Find all parallel nodes
Object.entries(blocks)
.filter(([_, block]) => block.type === 'parallel')
.forEach(([id, block]) => {

View File

@@ -1,6 +1,5 @@
{
"lockfileVersion": 1,
"configVersion": 1,
"workspaces": {
"": {
"name": "simstudio",
@@ -26,6 +25,7 @@
"rss-parser": "3.13.0",
"socket.io-client": "4.8.1",
"twilio": "5.9.0",
"zod": "^3.24.2",
},
"devDependencies": {
"@biomejs/biome": "2.0.0-beta.5",
@@ -3936,8 +3936,6 @@
"oauth2-mock-server/jose": ["jose@5.10.0", "", {}, "sha512-s+3Al/p9g32Iq+oqXxkW//7jk2Vig6FF1CFqzVXoTUXt2qz89YWbL+OwS17NFYEvxC35n0FKeGO2LGYSxeM2Gg=="],
"ollama-ai-provider-v2/zod": ["zod@4.2.0", "", {}, "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw=="],
"openai/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
"openai/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],

View File

@@ -4,17 +4,17 @@
# Global configuration
global:
imageRegistry: "ghcr.io"
storageClass: "gp3"
storageClass: "gp2" # Use gp2 (default on EKS) or create gp3 StorageClass for better performance
# Main application
app:
enabled: true
replicaCount: 2
# Node selector for application pods (customize based on your EKS node labels)
nodeSelector:
kubernetes.io/arch: amd64
node.kubernetes.io/instance-type: "t3.large"
# Node selector for application pods
# Uncomment and customize based on your EKS node labels:
# nodeSelector:
# node.kubernetes.io/instance-type: "t3.large"
resources:
limits:
@@ -28,8 +28,8 @@ app:
env:
NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com"
BETTER_AUTH_URL: "https://simstudio.acme.com"
SOCKET_SERVER_URL: "https://simstudio-ws.acme.com"
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com"
# SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002)
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers
# Security settings (REQUIRED - replace with your own secure secrets)
# Generate using: openssl rand -hex 32
@@ -52,11 +52,11 @@ app:
realtime:
enabled: true
replicaCount: 2
# Node selector for realtime pods (customize based on your EKS node labels)
nodeSelector:
kubernetes.io/arch: amd64
node.kubernetes.io/instance-type: "t3.medium"
# Node selector for realtime pods
# Uncomment and customize based on your EKS node labels:
# nodeSelector:
# node.kubernetes.io/instance-type: "t3.medium"
resources:
limits:
@@ -89,10 +89,11 @@ migrations:
# PostgreSQL database
postgresql:
enabled: true
# Node selector for database pods (recommended: memory-optimized EC2 instances)
nodeSelector:
node.kubernetes.io/instance-type: "r5.large"
# Node selector for database pods
# Uncomment and customize (recommended: memory-optimized EC2 instances like r5.large):
# nodeSelector:
# node.kubernetes.io/instance-type: "r5.large"
# Database authentication (REQUIRED - set secure credentials)
auth:
@@ -109,17 +110,17 @@ postgresql:
memory: "2Gi"
cpu: "1000m"
# Persistent storage using AWS EBS GP3 volumes
# Persistent storage using AWS EBS volumes
persistence:
enabled: true
storageClass: "gp3"
storageClass: "gp2" # Use gp2 (default) or create gp3 StorageClass
size: 50Gi
accessModes:
- ReadWriteOnce
# SSL/TLS configuration
# SSL/TLS configuration (requires cert-manager to be installed)
tls:
enabled: true
enabled: false # Set to true if cert-manager is installed
certificatesSecret: postgres-tls-secret
# PostgreSQL performance tuning for AWS infrastructure
@@ -130,14 +131,15 @@ postgresql:
minWalSize: "160MB"
# Ollama AI models with GPU acceleration (AWS EC2 GPU instances)
# Set ollama.enabled: false if you don't need local AI models
ollama:
enabled: true
enabled: false
replicaCount: 1
# GPU node targeting (recommended: g4dn.xlarge or p3.2xlarge instances)
nodeSelector:
node.kubernetes.io/instance-type: "g4dn.xlarge"
kubernetes.io/arch: amd64
# GPU node targeting - uncomment and customize for GPU instances
# Recommended: g4dn.xlarge or p3.2xlarge instances
# nodeSelector:
# node.kubernetes.io/instance-type: "g4dn.xlarge"
tolerations:
- key: "nvidia.com/gpu"
@@ -162,7 +164,7 @@ ollama:
# High-performance storage for AI models
persistence:
enabled: true
storageClass: "gp3"
storageClass: "gp2" # Use gp2 (default) or create gp3 StorageClass
size: 100Gi
accessModes:
- ReadWriteOnce

View File

@@ -4,16 +4,19 @@
# Global configuration
global:
imageRegistry: "ghcr.io"
storageClass: "managed-csi-premium"
# Use "managed-csi-premium" for Premium SSD (requires Premium storage-capable VMs like Standard_DS*)
# Use "managed-csi" for Standard SSD (works with all VM types)
storageClass: "managed-csi"
# Main application
app:
enabled: true
replicaCount: 1
# Node selector for application pods (customize based on your AKS node labels)
nodeSelector:
node-role: application
replicaCount: 2
# Node selector for application pods
# Uncomment and customize based on your AKS node labels:
# nodeSelector:
# agentpool: "application"
resources:
limits:
@@ -26,8 +29,8 @@ app:
env:
NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com"
BETTER_AUTH_URL: "https://simstudio.acme.com"
SOCKET_SERVER_URL: "https://simstudio-ws.acme.com"
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com"
# SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002)
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers
# Security settings (REQUIRED - replace with your own secure secrets)
# Generate using: openssl rand -hex 32
@@ -46,11 +49,12 @@ app:
# Realtime service
realtime:
enabled: true
replicaCount: 1
# Node selector for application pods (customize based on your AKS node labels)
nodeSelector:
node-role: application
replicaCount: 2
# Node selector for realtime pods
# Uncomment and customize based on your AKS node labels:
# nodeSelector:
# agentpool: "application"
resources:
limits:
@@ -74,10 +78,11 @@ migrations:
# PostgreSQL database
postgresql:
enabled: true
# Node selector for database pods (recommended: memory-optimized VM sizes)
nodeSelector:
node-role: datalake
# Node selector for database pods
# Uncomment and customize (recommended: memory-optimized VM sizes):
# nodeSelector:
# agentpool: "database"
# Database authentication (REQUIRED - set secure credentials)
auth:
@@ -93,15 +98,15 @@ postgresql:
memory: "1Gi"
cpu: "500m"
# Persistent storage using Azure Premium SSD
# Persistent storage using Azure Managed Disk
persistence:
enabled: true
storageClass: "managed-csi-premium"
storageClass: "managed-csi"
size: 10Gi
# SSL/TLS configuration (recommended for production)
# SSL/TLS configuration (requires cert-manager to be installed)
tls:
enabled: true
enabled: false # Set to true if cert-manager is installed
certificatesSecret: postgres-tls-secret
# PostgreSQL performance tuning for Azure infrastructure
@@ -112,13 +117,15 @@ postgresql:
minWalSize: "80MB"
# Ollama AI models with GPU acceleration (Azure NC-series VMs)
# Set ollama.enabled: false if you don't need local AI models
ollama:
enabled: true
enabled: false
replicaCount: 1
# GPU node targeting (recommended: NC6s_v3 or NC12s_v3 VMs)
nodeSelector:
accelerator: nvidia
# GPU node targeting - uncomment and customize for GPU node pools
# Recommended: NC6s_v3 or NC12s_v3 VMs
# nodeSelector:
# agentpool: "gpu"
tolerations:
- key: "sku"
@@ -139,7 +146,7 @@ ollama:
memory: "4Gi"
cpu: "1000m"
# High-performance storage for AI models
# High-performance storage for AI models (use managed-csi-premium for GPU workloads)
persistence:
enabled: true
storageClass: "managed-csi-premium"

View File

@@ -10,11 +10,11 @@ global:
app:
enabled: true
replicaCount: 2
# Node selector for application pods (customize based on your GKE node labels)
nodeSelector:
kubernetes.io/arch: amd64
cloud.google.com/gke-nodepool: "default-pool"
# Node selector for application pods
# Uncomment and customize based on your GKE node labels:
# nodeSelector:
# cloud.google.com/gke-nodepool: "default-pool"
resources:
limits:
@@ -28,8 +28,8 @@ app:
env:
NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com"
BETTER_AUTH_URL: "https://simstudio.acme.com"
SOCKET_SERVER_URL: "https://simstudio-ws.acme.com"
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com"
# SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002)
NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers
# Security settings (REQUIRED - replace with your own secure secrets)
# Generate using: openssl rand -hex 32
@@ -53,11 +53,11 @@ app:
realtime:
enabled: true
replicaCount: 2
# Node selector for realtime pods (customize based on your GKE node labels)
nodeSelector:
kubernetes.io/arch: amd64
cloud.google.com/gke-nodepool: "default-pool"
# Node selector for realtime pods
# Uncomment and customize based on your GKE node labels:
# nodeSelector:
# cloud.google.com/gke-nodepool: "default-pool"
resources:
limits:
@@ -90,11 +90,11 @@ migrations:
# PostgreSQL database
postgresql:
enabled: true
# Node selector for database pods (recommended: memory-optimized machine types)
nodeSelector:
cloud.google.com/gke-nodepool: "database-pool"
cloud.google.com/machine-family: "n2"
# Node selector for database pods
# Uncomment and customize (recommended: memory-optimized machine types):
# nodeSelector:
# cloud.google.com/gke-nodepool: "database-pool"
# Database authentication (REQUIRED - set secure credentials)
auth:
@@ -119,9 +119,9 @@ postgresql:
accessModes:
- ReadWriteOnce
# SSL/TLS configuration
# SSL/TLS configuration (requires cert-manager to be installed)
tls:
enabled: true
enabled: false # Set to true if cert-manager is installed
certificatesSecret: postgres-tls-secret
# PostgreSQL performance tuning for GCP infrastructure
@@ -132,14 +132,16 @@ postgresql:
minWalSize: "160MB"
# Ollama AI models with GPU acceleration (GCP GPU instances)
# Set ollama.enabled: false if you don't need local AI models
ollama:
enabled: true
enabled: false
replicaCount: 1
# GPU node targeting (recommended: T4 or V100 GPU instances)
nodeSelector:
cloud.google.com/gke-nodepool: "gpu-pool"
cloud.google.com/gke-accelerator: "nvidia-tesla-t4"
# GPU node targeting - uncomment and customize for GPU node pools
# Recommended: T4 or V100 GPU instances
# nodeSelector:
# cloud.google.com/gke-nodepool: "gpu-pool"
# cloud.google.com/gke-accelerator: "nvidia-tesla-t4"
tolerations:
- key: "nvidia.com/gpu"

View File

@@ -204,9 +204,15 @@ Validate required secrets and reject default placeholder values
{{- if and .Values.postgresql.enabled (eq .Values.postgresql.auth.password "CHANGE-ME-SECURE-PASSWORD") }}
{{- fail "postgresql.auth.password must not use the default placeholder value. Set a secure password for production" }}
{{- end }}
{{- if and .Values.postgresql.enabled (not (regexMatch "^[a-zA-Z0-9._-]+$" .Values.postgresql.auth.password)) }}
{{- fail "postgresql.auth.password must only contain alphanumeric characters, hyphens, underscores, or periods to ensure DATABASE_URL compatibility. Generate with: openssl rand -base64 16 | tr -d '/+='" }}
{{- end }}
{{- if and .Values.externalDatabase.enabled (not .Values.externalDatabase.password) }}
{{- fail "externalDatabase.password is required when using external database" }}
{{- end }}
{{- if and .Values.externalDatabase.enabled .Values.externalDatabase.password (not (regexMatch "^[a-zA-Z0-9._-]+$" .Values.externalDatabase.password)) }}
{{- fail "externalDatabase.password must only contain alphanumeric characters, hyphens, underscores, or periods to ensure DATABASE_URL compatibility." }}
{{- end }}
{{- end }}
{{/*

View File

@@ -68,7 +68,7 @@ spec:
- name: DATABASE_URL
value: {{ include "sim.databaseUrl" . | quote }}
- name: SOCKET_SERVER_URL
value: {{ .Values.app.env.SOCKET_SERVER_URL | default "http://localhost:3002" | quote }}
value: {{ include "sim.socketServerUrl" . | quote }}
- name: OLLAMA_URL
value: {{ include "sim.ollamaUrl" . | quote }}
{{- range $key, $value := omit .Values.app.env "DATABASE_URL" "SOCKET_SERVER_URL" "OLLAMA_URL" }}

View File

@@ -185,8 +185,7 @@
},
"OLLAMA_URL": {
"type": "string",
"format": "uri",
"description": "Ollama local LLM server URL"
"description": "Ollama local LLM server URL (leave empty if not using Ollama)"
},
"ELEVENLABS_API_KEY": {
"type": "string",
@@ -238,18 +237,15 @@
},
"NEXT_PUBLIC_BRAND_LOGO_URL": {
"type": "string",
"format": "uri",
"description": "Custom logo URL (must be a full URL, e.g., https://example.com/logo.png)"
"description": "Custom logo URL (leave empty for default)"
},
"NEXT_PUBLIC_BRAND_FAVICON_URL": {
"type": "string",
"format": "uri",
"description": "Custom favicon URL (must be a full URL, e.g., https://example.com/favicon.ico)"
"description": "Custom favicon URL (leave empty for default)"
},
"NEXT_PUBLIC_CUSTOM_CSS_URL": {
"type": "string",
"format": "uri",
"description": "Custom stylesheet URL (must be a full URL)"
"description": "Custom stylesheet URL (leave empty for none)"
},
"NEXT_PUBLIC_SUPPORT_EMAIL": {
"type": "string",

View File

@@ -52,8 +52,9 @@ app:
# Application URLs
NEXT_PUBLIC_APP_URL: "http://localhost:3000"
BETTER_AUTH_URL: "http://localhost:3000"
SOCKET_SERVER_URL: "http://localhost:3002"
NEXT_PUBLIC_SOCKET_URL: "http://localhost:3002"
# SOCKET_SERVER_URL: Auto-detected when realtime.enabled=true (uses internal service)
# Only set this if using an external WebSocket service with realtime.enabled=false
NEXT_PUBLIC_SOCKET_URL: "http://localhost:3002" # Public WebSocket URL for browsers
# Node environment
NODE_ENV: "production"
@@ -99,15 +100,8 @@ app:
# Rate Limiting Configuration (per minute)
RATE_LIMIT_WINDOW_MS: "60000" # Rate limit window duration (1 minute)
RATE_LIMIT_FREE_SYNC: "10" # Free tier sync API executions
RATE_LIMIT_PRO_SYNC: "25" # Pro tier sync API executions
RATE_LIMIT_TEAM_SYNC: "75" # Team tier sync API executions
RATE_LIMIT_ENTERPRISE_SYNC: "150" # Enterprise tier sync API executions
RATE_LIMIT_FREE_ASYNC: "50" # Free tier async API executions
RATE_LIMIT_PRO_ASYNC: "200" # Pro tier async API executions
RATE_LIMIT_TEAM_ASYNC: "500" # Team tier async API executions
RATE_LIMIT_ENTERPRISE_ASYNC: "1000" # Enterprise tier async API executions
MANUAL_EXECUTION_LIMIT: "999999" # Manual execution bypass value
RATE_LIMIT_FREE_SYNC: "10" # Sync API executions per minute
RATE_LIMIT_FREE_ASYNC: "50" # Async API executions per minute
# UI Branding & Whitelabeling Configuration
NEXT_PUBLIC_BRAND_NAME: "Sim" # Custom brand name

View File

@@ -38,6 +38,7 @@
"next-runtime-env": "3.3.0",
"@modelcontextprotocol/sdk": "1.20.2",
"@t3-oss/env-nextjs": "0.13.4",
"zod": "^3.24.2",
"@tanstack/react-query": "5.90.8",
"@tanstack/react-query-devtools": "5.90.2",
"@types/fluent-ffmpeg": "2.1.28",