fix(router): fixed routing issue with workflow block, added tests (#739)

* fixed routing issue with workflow block, added tests

* add new dmeo

* change console to logger

* new demo

* changed all console to logger
This commit is contained in:
Waleed Latif
2025-07-21 12:12:53 -07:00
committed by GitHub
parent 7739917941
commit 7f0f902204
23 changed files with 732 additions and 98 deletions

View File

@@ -1,6 +1,9 @@
import { NextResponse } from 'next/server'
import { createLogger } from '@/lib/logs/console-logger'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePages')
export const dynamic = 'force-dynamic'
export async function POST(request: Request) {
@@ -39,7 +42,7 @@ export async function POST(request: Request) {
const queryString = queryParams.toString()
const url = queryString ? `${baseUrl}?${queryString}` : baseUrl
console.log(`Fetching Confluence pages from: ${url}`)
logger.info(`Fetching Confluence pages from: ${url}`)
// Make the request to Confluence API with OAuth Bearer token
const response = await fetch(url, {
@@ -50,23 +53,23 @@ export async function POST(request: Request) {
},
})
console.log('Response status:', response.status, response.statusText)
logger.info('Response status:', response.status, response.statusText)
if (!response.ok) {
console.error(`Confluence API error: ${response.status} ${response.statusText}`)
logger.error(`Confluence API error: ${response.status} ${response.statusText}`)
let errorMessage
try {
const errorData = await response.json()
console.error('Error details:', JSON.stringify(errorData, null, 2))
logger.error('Error details:', JSON.stringify(errorData, null, 2))
errorMessage = errorData.message || `Failed to fetch Confluence pages (${response.status})`
} catch (e) {
console.error('Could not parse error response as JSON:', e)
logger.error('Could not parse error response as JSON:', e)
// Try to get the response text for more context
try {
const text = await response.text()
console.error('Response text:', text)
logger.error('Response text:', text)
errorMessage = `Failed to fetch Confluence pages: ${response.status} ${response.statusText}`
} catch (_textError) {
errorMessage = `Failed to fetch Confluence pages: ${response.status} ${response.statusText}`
@@ -77,13 +80,13 @@ export async function POST(request: Request) {
}
const data = await response.json()
console.log('Confluence API response:', `${JSON.stringify(data, null, 2).substring(0, 300)}...`)
console.log(`Found ${data.results?.length || 0} pages`)
logger.info('Confluence API response:', `${JSON.stringify(data, null, 2).substring(0, 300)}...`)
logger.info(`Found ${data.results?.length || 0} pages`)
if (data.results && data.results.length > 0) {
console.log('First few pages:')
logger.info('First few pages:')
for (const page of data.results.slice(0, 3)) {
console.log(`- ${page.id}: ${page.title}`)
logger.info(`- ${page.id}: ${page.title}`)
}
}
@@ -99,7 +102,7 @@ export async function POST(request: Request) {
})),
})
} catch (error) {
console.error('Error fetching Confluence pages:', error)
logger.error('Error fetching Confluence pages:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }

View File

@@ -2,9 +2,12 @@ import crypto from 'crypto'
import { and, desc, eq, isNull } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { permissions, workflow, workflowBlocks, workspace } from '@/db/schema'
const logger = createLogger('Workspaces')
// Get all workspaces for the current user
export async function GET() {
const session = await getSession()
@@ -244,12 +247,12 @@ async function createWorkspace(userId: string, name: string) {
updatedAt: now,
})
console.log(
`Created workspace ${workspaceId} with initial workflow ${workflowId} for user ${userId}`
logger.info(
`Created workspace ${workspaceId} with initial workflow ${workflowId} for user ${userId}`
)
})
} catch (error) {
console.error(`Failed to create workspace ${workspaceId} with initial workflow:`, error)
logger.error(`Failed to create workspace ${workspaceId} with initial workflow:`, error)
throw error
}
@@ -276,7 +279,7 @@ async function migrateExistingWorkflows(userId: string, workspaceId: string) {
return // No orphaned workflows to migrate
}
console.log(
logger.info(
`Migrating ${orphanedWorkflows.length} workflows to workspace ${workspaceId} for user ${userId}`
)
@@ -308,6 +311,6 @@ async function ensureWorkflowsHaveWorkspace(userId: string, defaultWorkspaceId:
})
.where(and(eq(workflow.userId, userId), isNull(workflow.workspaceId)))
console.log(`Fixed ${orphanedWorkflows.length} orphaned workflows for user ${userId}`)
logger.info(`Fixed ${orphanedWorkflows.length} orphaned workflows for user ${userId}`)
}
}

View File

@@ -1,11 +1,14 @@
import { and, eq } from 'drizzle-orm'
import { notFound } from 'next/navigation'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { templateStars, templates } from '@/db/schema'
import type { Template } from '../templates'
import TemplateDetails from './template'
const logger = createLogger('TemplatePage')
interface TemplatePageProps {
params: Promise<{
workspaceId: string
@@ -58,7 +61,7 @@ export default async function TemplatePage({ params }: TemplatePageProps) {
// Validate that required fields are present
if (!template.id || !template.name || !template.author) {
console.error('Template missing required fields:', {
logger.error('Template missing required fields:', {
id: template.id,
name: template.name,
author: template.author,
@@ -100,9 +103,9 @@ export default async function TemplatePage({ params }: TemplatePageProps) {
isStarred,
}
console.log('Template from DB:', template)
console.log('Serialized template:', serializedTemplate)
console.log('Template state from DB:', template.state)
logger.info('Template from DB:', template)
logger.info('Serialized template:', serializedTemplate)
logger.info('Template state from DB:', template.state)
return (
<TemplateDetails

View File

@@ -143,7 +143,7 @@ export default function TemplateDetails({
const renderWorkflowPreview = () => {
// Follow the same pattern as deployed-workflow-card.tsx
if (!template?.state) {
console.log('Template has no state:', template)
logger.info('Template has no state:', template)
return (
<div className='flex h-full items-center justify-center text-center'>
<div className='text-muted-foreground'>
@@ -154,10 +154,10 @@ export default function TemplateDetails({
)
}
console.log('Template state:', template.state)
console.log('Template state type:', typeof template.state)
console.log('Template state blocks:', template.state.blocks)
console.log('Template state edges:', template.state.edges)
logger.info('Template state:', template.state)
logger.info('Template state type:', typeof template.state)
logger.info('Template state blocks:', template.state.blocks)
logger.info('Template state edges:', template.state.edges)
try {
return (

View File

@@ -92,7 +92,7 @@ export default function Templates({ initialTemplates, currentUserId }: Templates
const handleCreateNew = () => {
// TODO: Open create template modal or navigate to create page
console.log('Create new template')
logger.info('Create new template')
}
// Handle star change callback from template card

View File

@@ -17,11 +17,14 @@ import {
import { Input } from '@/components/ui/input'
import { ScrollArea } from '@/components/ui/scroll-area'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console-logger'
import { validateName } from '@/lib/utils'
import { useVariablesStore } from '@/stores/panel/variables/store'
import type { Variable, VariableType } from '@/stores/panel/variables/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('Variables')
export function Variables() {
const { activeWorkflowId, workflows } = useWorkflowRegistry()
const {
@@ -190,7 +193,7 @@ export function Variables() {
return undefined // Valid object
} catch (e) {
console.log('Object parsing error:', e)
logger.info('Object parsing error:', e)
return 'Invalid object syntax'
}
case 'array':
@@ -215,7 +218,7 @@ export function Variables() {
return undefined // Valid array
} catch (e) {
console.log('Array parsing error:', e)
logger.info('Array parsing error:', e)
return 'Invalid array syntax'
}
default:

View File

@@ -13,6 +13,7 @@ import {
CommandList,
} from '@/components/ui/command'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
import { createLogger } from '@/lib/logs/console-logger'
import {
type Credential,
getProviderIdFromServiceId,
@@ -21,6 +22,8 @@ import {
} from '@/lib/oauth'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = createLogger('ConfluenceFileSelector')
export interface ConfluenceFileInfo {
id: string
name: string
@@ -138,7 +141,7 @@ export function ConfluenceFileSelector({
}
}
} catch (error) {
console.error('Error fetching credentials:', error)
logger.error('Error fetching credentials:', error)
} finally {
setIsLoading(false)
}
@@ -205,7 +208,7 @@ export function ConfluenceFileSelector({
onFileInfoChange?.(data.file)
}
} catch (error) {
console.error('Error fetching page info:', error)
logger.error('Error fetching page info:', error)
setError((error as Error).message)
} finally {
setIsLoading(false)
@@ -247,7 +250,7 @@ export function ConfluenceFileSelector({
if (!tokenResponse.ok) {
const errorData = await tokenResponse.json()
console.error('Access token error:', errorData)
logger.error('Access token error:', errorData)
// If there's a token error, we might need to reconnect the account
setError('Authentication failed. Please reconnect your Confluence account.')
@@ -259,7 +262,7 @@ export function ConfluenceFileSelector({
const accessToken = tokenData.accessToken
if (!accessToken) {
console.error('No access token returned')
logger.error('No access token returned')
setError('Authentication failed. Please reconnect your Confluence account.')
setIsLoading(false)
return
@@ -281,12 +284,12 @@ export function ConfluenceFileSelector({
if (!response.ok) {
const errorData = await response.json()
console.error('Confluence API error:', errorData)
logger.error('Confluence API error:', errorData)
throw new Error(errorData.error || 'Failed to fetch pages')
}
const data = await response.json()
console.log(`Received ${data.files?.length || 0} files from API`)
logger.info(`Received ${data.files?.length || 0} files from API`)
setFiles(data.files || [])
// If we have a selected file ID, find the file info
@@ -301,7 +304,7 @@ export function ConfluenceFileSelector({
}
}
} catch (error) {
console.error('Error fetching pages:', error)
logger.error('Error fetching pages:', error)
setError((error as Error).message)
setFiles([])
} finally {

View File

@@ -1,4 +1,5 @@
import { useCallback, useEffect, useState } from 'react'
import { logger } from '@trigger.dev/sdk/v3'
import { PlusIcon, WrenchIcon, XIcon } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
@@ -684,34 +685,25 @@ export function ToolInput({
}
const handleOperationChange = (toolIndex: number, operation: string) => {
console.log('🔄 handleOperationChange called:', { toolIndex, operation, isPreview, disabled })
if (isPreview || disabled) {
console.log('❌ Early return: preview or disabled')
logger.info('❌ Early return: preview or disabled')
return
}
const tool = selectedTools[toolIndex]
console.log('🔧 Current tool:', tool)
const newToolId = getToolIdForOperation(tool.type, operation)
console.log('🆔 getToolIdForOperation result:', { toolType: tool.type, operation, newToolId })
if (!newToolId) {
console.log('❌ Early return: no newToolId')
logger.info('❌ Early return: no newToolId')
return
}
// Get parameters for the new tool
const toolParams = getToolParametersConfig(newToolId, tool.type)
console.log('📋 getToolParametersConfig result:', {
newToolId,
toolType: tool.type,
toolParams,
})
if (!toolParams) {
console.log('❌ Early return: no toolParams')
logger.info('❌ Early return: no toolParams')
return
}

View File

@@ -605,9 +605,9 @@ export function useWorkflowExecution() {
}
try {
console.log('Executing debug step with blocks:', pendingBlocks)
logger.info('Executing debug step with blocks:', pendingBlocks)
const result = await executor!.continueExecution(pendingBlocks, debugContext!)
console.log('Debug step execution result:', result)
logger.info('Debug step execution result:', result)
if (isDebugSessionComplete(result)) {
await handleDebugSessionComplete(result)
@@ -660,7 +660,7 @@ export function useWorkflowExecution() {
let currentContext = { ...debugContext! }
let currentPendingBlocks = [...pendingBlocks]
console.log('Starting resume execution with blocks:', currentPendingBlocks)
logger.info('Starting resume execution with blocks:', currentPendingBlocks)
// Continue execution until there are no more pending blocks
let iterationCount = 0

View File

@@ -402,7 +402,6 @@ const WorkflowContent = React.memo(() => {
}
const { type } = event.detail
console.log('🛠️ Adding block from toolbar:', type)
if (!type) return
if (type === 'connectionBlock') return

View File

@@ -4,12 +4,15 @@ import { useCallback, useEffect, useMemo, useState } from 'react'
import clsx from 'clsx'
import { useParams, usePathname } from 'next/navigation'
import { Skeleton } from '@/components/ui/skeleton'
import { createLogger } from '@/lib/logs/console-logger'
import { type FolderTreeNode, useFolderStore } from '@/stores/folders/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowMetadata } from '@/stores/workflows/registry/types'
import { FolderItem } from './components/folder-item'
import { WorkflowItem } from './components/workflow-item'
const logger = createLogger('FolderTree')
interface FolderSectionProps {
folder: FolderTreeNode
level: number
@@ -282,9 +285,9 @@ function useDragHandlers(
for (const workflowId of workflowIds) {
await updateWorkflow(workflowId, { folderId: targetFolderId })
}
console.log(logMessage || `Moved ${workflowIds.length} workflow(s)`)
logger.info(logMessage || `Moved ${workflowIds.length} workflow(s)`)
} catch (error) {
console.error('Failed to move workflows:', error)
logger.error('Failed to move workflows:', error)
}
}
@@ -298,7 +301,7 @@ function useDragHandlers(
// Prevent circular references - don't allow dropping a folder into itself or its descendants
if (targetFolderId === folderIdData) {
console.log('Cannot move folder into itself')
logger.info('Cannot move folder into itself')
return
}
@@ -308,21 +311,21 @@ function useDragHandlers(
targetFolderId &&
draggedFolderPath.some((ancestor) => ancestor.id === targetFolderId)
) {
console.log('Cannot move folder into its own descendant')
logger.info('Cannot move folder into its own descendant')
return
}
// If target folder is already at level 1 (has 1 parent), we can't nest another folder
if (targetFolderPath.length >= 1) {
console.log('Cannot nest folder: Maximum 2 levels of nesting allowed. Drop prevented.')
logger.info('Cannot nest folder: Maximum 2 levels of nesting allowed. Drop prevented.')
return // Prevent the drop entirely
}
// Target folder is at root level, safe to nest
await updateFolder(folderIdData, { parentId: targetFolderId })
console.log(`Moved folder to ${targetFolderId ? `folder ${targetFolderId}` : 'root'}`)
logger.info(`Moved folder to ${targetFolderId ? `folder ${targetFolderId}` : 'root'}`)
} catch (error) {
console.error('Failed to move folder:', error)
logger.error('Failed to move folder:', error)
}
}
}
@@ -416,9 +419,9 @@ export function FolderTree({
for (const folder of deepFolders) {
try {
await updateFolderAPI(folder.id, { parentId: null })
console.log(`Moved deeply nested folder "${folder.name}" to root level`)
logger.info(`Moved deeply nested folder "${folder.name}" to root level`)
} catch (error) {
console.error(`Failed to move folder "${folder.name}":`, error)
logger.error(`Failed to move folder "${folder.name}":`, error)
}
}
}, [workspaceId])

View File

@@ -805,7 +805,7 @@ describe('Executor', () => {
executedBlocks,
mockContext
)
expect(nonSelectedResult).toBe(true) // router executed + target NOT selected = dependency auto-met
expect(nonSelectedResult).toBe(false) // router executed + target NOT selected = dependency NOT met
})
test('should handle condition decisions correctly in dependency checking', () => {
@@ -837,7 +837,7 @@ describe('Executor', () => {
{ source: 'condition1', target: 'falseTarget', sourceHandle: 'condition-false' },
]
const falseResult = checkDependencies(falseConnections, executedBlocks, mockContext)
expect(falseResult).toBe(true) // condition executed + path NOT selected = dependency auto-met
expect(falseResult).toBe(false) // condition executed + path NOT selected = dependency NOT met
})
test('should handle regular sequential dependencies correctly', () => {

View File

@@ -1123,9 +1123,9 @@ export class Executor {
const conditionId = conn.sourceHandle.replace('condition-', '')
const selectedCondition = context.decisions.condition.get(conn.source)
// If source is executed and this is not the selected path, consider it met
// If source is executed and this is not the selected path, dependency is NOT met
if (sourceExecuted && selectedCondition && conditionId !== selectedCondition) {
return true
return false
}
// Otherwise, this dependency is met only if source is executed and this is the selected path
@@ -1137,9 +1137,9 @@ export class Executor {
if (sourceBlock?.metadata?.id === BlockType.ROUTER) {
const selectedTarget = context.decisions.router.get(conn.source)
// If source is executed and this is not the selected target, consider it met
// If source is executed and this is not the selected target, dependency is NOT met
if (sourceExecuted && selectedTarget && conn.target !== selectedTarget) {
return true
return false
}
// Otherwise, this dependency is met only if source is executed and this is the selected target

View File

@@ -7,6 +7,7 @@ describe('Routing', () => {
it.concurrent('should categorize flow control blocks correctly', () => {
expect(Routing.getCategory(BlockType.PARALLEL)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.LOOP)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe(BlockCategory.FLOW_CONTROL)
})
it.concurrent('should categorize routing blocks correctly', () => {
@@ -19,6 +20,8 @@ describe('Routing', () => {
expect(Routing.getCategory(BlockType.AGENT)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.API)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.STARTER)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.RESPONSE)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.EVALUATOR)).toBe(BlockCategory.REGULAR_BLOCK)
})
it.concurrent('should default to regular block for unknown types', () => {
@@ -36,6 +39,7 @@ describe('Routing', () => {
it.concurrent('should return false for flow control blocks', () => {
expect(Routing.shouldActivateDownstream(BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.LOOP)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.WORKFLOW)).toBe(false)
})
it.concurrent('should return true for regular blocks', () => {
@@ -53,6 +57,7 @@ describe('Routing', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.requiresActivePathCheck(BlockType.PARALLEL)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.LOOP)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {
@@ -75,6 +80,7 @@ describe('Routing', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.shouldSkipInSelectiveActivation(BlockType.PARALLEL)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.LOOP)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {

View File

@@ -7,31 +7,70 @@ export enum BlockCategory {
}
export interface RoutingBehavior {
shouldActivateDownstream: boolean
requiresActivePathCheck: boolean
skipInSelectiveActivation: boolean
shouldActivateDownstream: boolean // Whether this block should activate downstream blocks when it completes
requiresActivePathCheck: boolean // Whether this block's handler needs routing-aware logic (NOT universal path checking)
skipInSelectiveActivation: boolean // Whether to skip this block type during connection filtering in selective activation
}
/**
* Centralized routing strategy that defines how different block types
* should behave in the execution path system.
*
* IMPORTANT: This system works in conjunction with the executor's universal
* active path checking (executor/index.ts lines 992-994). The flags here
* control specialized behavior, not basic path enforcement.
*
* ## Execution Flow Architecture:
*
* 1. **Universal Path Check** (Executor Level):
* - ALL blocks are subject to `context.activeExecutionPath.has(block.id)`
* - This prevents unselected blocks from executing (fixes router bypass bug)
*
* 2. **Specialized Routing Behavior** (Handler Level):
* - Some block handlers need additional routing logic
* - Controlled by `requiresActivePathCheck` flag
*
* ## Block Categories Explained:
*
* ### ROUTING_BLOCK (Router, Condition)
* - **Role**: Decision makers that CREATE active execution paths
* - **Path Check**: NO - they must execute to make routing decisions
* - **Downstream**: YES - they activate their selected targets
* - **Selective**: NO - they participate in making routing decisions
*
* ### FLOW_CONTROL (Parallel, Loop, Workflow)
* - **Role**: Complex blocks that CONSUME routing decisions
* - **Path Check**: YES - their handlers need routing awareness for internal logic
* - **Downstream**: NO - they manage their own internal activation patterns
* - **Selective**: YES - skip them during connection filtering to prevent premature activation
*
* ### REGULAR_BLOCK (Function, Agent, API, etc.)
* - **Role**: Standard execution blocks with simple activation patterns
* - **Path Check**: NO - they rely on dependency logic and universal path checking
* - **Downstream**: YES - they activate all downstream blocks normally
* - **Selective**: NO - they participate in normal activation patterns
*
* ## Multi-Input Support:
* The dependency checking logic (executor/index.ts lines 1149-1153) allows blocks
* with multiple inputs to execute when ANY valid input is available, supporting
* scenarios like agents that reference multiple router destinations.
*/
export class Routing {
private static readonly BEHAVIOR_MAP: Record<BlockCategory, RoutingBehavior> = {
[BlockCategory.ROUTING_BLOCK]: {
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
shouldActivateDownstream: true, // Routing blocks activate their SELECTED targets (not all connected targets)
requiresActivePathCheck: false, // They don't need handler-level path checking - they CREATE the paths
skipInSelectiveActivation: false, // They participate in routing decisions, so don't skip during activation
},
[BlockCategory.FLOW_CONTROL]: {
shouldActivateDownstream: false,
requiresActivePathCheck: true,
skipInSelectiveActivation: true,
shouldActivateDownstream: false, // Flow control blocks manage their own complex internal activation
requiresActivePathCheck: true, // Their handlers need routing context for internal decision making
skipInSelectiveActivation: true, // Skip during selective activation to prevent bypassing routing decisions
},
[BlockCategory.REGULAR_BLOCK]: {
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
shouldActivateDownstream: true, // Regular blocks activate all connected downstream blocks
requiresActivePathCheck: false, // They use universal path checking + dependency logic instead
skipInSelectiveActivation: false, // They participate in normal activation patterns
},
}
@@ -39,6 +78,7 @@ export class Routing {
// Flow control blocks
[BlockType.PARALLEL]: BlockCategory.FLOW_CONTROL,
[BlockType.LOOP]: BlockCategory.FLOW_CONTROL,
[BlockType.WORKFLOW]: BlockCategory.FLOW_CONTROL,
// Routing blocks
[BlockType.ROUTER]: BlockCategory.ROUTING_BLOCK,
@@ -50,7 +90,6 @@ export class Routing {
[BlockType.API]: BlockCategory.REGULAR_BLOCK,
[BlockType.EVALUATOR]: BlockCategory.REGULAR_BLOCK,
[BlockType.RESPONSE]: BlockCategory.REGULAR_BLOCK,
[BlockType.WORKFLOW]: BlockCategory.REGULAR_BLOCK,
[BlockType.STARTER]: BlockCategory.REGULAR_BLOCK,
}
@@ -67,16 +106,31 @@ export class Routing {
return Routing.getBehavior(blockType).shouldActivateDownstream
}
/**
* Determines if a block's HANDLER needs routing-aware logic.
* Note: This is NOT the same as universal path checking done by the executor.
*
* @param blockType The block type to check
* @returns true if the block handler should implement routing-aware behavior
*/
static requiresActivePathCheck(blockType: string): boolean {
return Routing.getBehavior(blockType).requiresActivePathCheck
}
/**
* Determines if a block type should be skipped during selective activation.
* Used to prevent certain block types from being prematurely activated
* when they should wait for explicit routing decisions.
*/
static shouldSkipInSelectiveActivation(blockType: string): boolean {
return Routing.getBehavior(blockType).skipInSelectiveActivation
}
/**
* Checks if a connection should be skipped during selective activation
* Checks if a connection should be skipped during selective activation.
*
* This prevents certain types of connections from triggering premature
* activation of blocks that should wait for explicit routing decisions.
*/
static shouldSkipConnection(sourceHandle: string | undefined, targetBlockType: string): boolean {
// Skip flow control specific connections (internal flow control handles)

View File

@@ -0,0 +1,253 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { Executor } from '@/executor/index'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Multi-Input Routing Scenarios', () => {
let workflow: SerializedWorkflow
let executor: Executor
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'start',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 150, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: {
tool: BlockType.ROUTER,
params: {
prompt: 'if the input is x, go to function 1.\notherwise, go to function 2.\ny',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 300, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'hi'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: 100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'bye'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
position: { x: 500, y: 0 },
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
config: {
tool: BlockType.AGENT,
params: {
systemPrompt: 'return the following in urdu roman english',
userPrompt: '<function1.result>\n<function2.result>',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'function-2' },
{ source: 'function-1', target: 'agent-1' }, // Agent depends on function-1
{ source: 'function-2', target: 'agent-1' }, // Agent depends on function-2
],
loops: {},
parallels: {},
}
executor = new Executor(workflow, {}, {})
})
it('should handle multi-input target when router selects function-1', async () => {
// Test scenario: Router selects function-1, agent should still execute with function-1's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start block
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
// Step 2: Router selects function-1 (not function-2)
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-1')
// Update execution paths after router-1
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-1 is active
expect(context.activeExecutionPath.has('function-1')).toBe(true)
expect(context.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Execute function-1
context.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-1')
// Update paths after function-1
pathTracker.updateExecutionPaths(['function-1'], context)
// Step 4: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
// Check dependencies for agent-1
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 5: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should be able to execute even though it has multiple inputs
// The key is that the dependency logic should handle this correctly:
// - function-1 executed and is selected → dependency met
// - function-2 not executed and not selected → dependency considered met (inactive source)
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should handle multi-input target when router selects function-2', async () => {
// Test scenario: Router selects function-2, agent should still execute with function-2's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start and router-1 selecting function-2
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-2')
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-2 is active
expect(context.activeExecutionPath.has('function-1')).toBe(false)
expect(context.activeExecutionPath.has('function-2')).toBe(true)
// Step 2: Execute function-2
context.blockStates.set('function-2', {
output: { result: 'bye', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-2')
pathTracker.updateExecutionPaths(['function-2'], context)
// Step 3: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 4: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should execute with function-2's output
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should verify the dependency logic for inactive sources', async () => {
// This test specifically validates the multi-input dependency logic
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Setup: Router executed and selected function-1, function-1 executed
context.executedBlocks.add('start')
context.executedBlocks.add('router-1')
context.executedBlocks.add('function-1')
context.decisions.router.set('router-1', 'function-1')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.activeExecutionPath.add('function-1')
context.activeExecutionPath.add('agent-1') // Agent should be active due to function-1
// Test individual dependency checks
const checkDependencies = (executor as any).checkDependencies.bind(executor)
// Connection from function-1 (executed, selected) → should be met
const function1Connection = [{ source: 'function-1', target: 'agent-1' }]
const function1DepMet = checkDependencies(function1Connection, context.executedBlocks, context)
// Connection from function-2 (not executed, not selected) → should be met because of inactive source logic
const function2Connection = [{ source: 'function-2', target: 'agent-1' }]
const function2DepMet = checkDependencies(function2Connection, context.executedBlocks, context)
// Both connections together (the actual agent scenario)
const bothConnections = [
{ source: 'function-1', target: 'agent-1' },
{ source: 'function-2', target: 'agent-1' },
]
const bothDepMet = checkDependencies(bothConnections, context.executedBlocks, context)
// CRITICAL ASSERTIONS:
expect(function1DepMet).toBe(true) // Executed and active
expect(function2DepMet).toBe(true) // Not in active path, so considered met (line 1151)
expect(bothDepMet).toBe(true) // All dependencies should be met
})
})

View File

@@ -0,0 +1,305 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import { Routing } from '@/executor/routing/routing'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Router → Workflow Block Execution Fix', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let mockContext: ExecutionContext
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'starter',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 200, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-2',
position: { x: 200, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 2' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'workflow-2',
position: { x: 300, y: 50 },
metadata: { id: BlockType.WORKFLOW, name: 'Workflow 2' },
config: { tool: BlockType.WORKFLOW, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'starter', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'router-2' },
{ source: 'router-2', target: 'function-2' },
{ source: 'router-2', target: 'workflow-2' },
],
loops: {},
parallels: {},
}
pathTracker = new PathTracker(workflow)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
// Initialize starter as executed and in active path
mockContext.executedBlocks.add('starter')
mockContext.activeExecutionPath.add('starter')
mockContext.activeExecutionPath.add('router-1')
})
it('should categorize workflow blocks as flow control blocks requiring active path checks', () => {
// Verify that workflow blocks now have the correct routing behavior
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe('flow-control')
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it('should prevent workflow blocks from executing when not selected by router', () => {
// This test recreates the exact bug scenario from the CSV data
// Step 1: Router 1 selects router-2 (not function-1)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
// Update paths after router execution
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Verify router decision
expect(mockContext.decisions.router.get('router-1')).toBe('router-2')
// After router-1 execution, router-2 should be active but not function-1
expect(mockContext.activeExecutionPath.has('router-2')).toBe(true)
expect(mockContext.activeExecutionPath.has('function-1')).toBe(false)
// CRITICAL: Workflow block should NOT be activated yet
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Router 2 selects function-2 (NOT workflow-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
// Update paths after router-2 execution
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('function-2')
// After router-2 execution, function-2 should be active
expect(mockContext.activeExecutionPath.has('function-2')).toBe(true)
// CRITICAL: Workflow block should still NOT be activated (this was the bug!)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 3: Simulate what the executor's getNextExecutionLayer would do
// This mimics the logic from executor/index.ts lines 991-994
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should only include function-2, NOT workflow-2
expect(blockIds).toContain('function-2')
expect(blockIds).not.toContain('workflow-2')
// Verify that workflow block is not in active path
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Verify that isInActivePath also returns false for workflow block
const isWorkflowActive = pathTracker.isInActivePath('workflow-2', mockContext)
expect(isWorkflowActive).toBe(false)
})
it('should allow workflow blocks to execute when selected by router', () => {
// Test the positive case - workflow block should execute when actually selected
// Step 1: Router 1 selects router-2
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Step 2: Router 2 selects workflow-2 (NOT function-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'workflow-2',
blockType: BlockType.WORKFLOW,
blockTitle: 'Workflow 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('workflow-2')
// After router-2 execution, workflow-2 should be active
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(true)
// Function-2 should NOT be activated
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Verify workflow block would be included in next execution layer
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should include workflow-2, NOT function-2
expect(blockIds).toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
})
it('should handle multiple sequential routers with workflow blocks correctly', () => {
// This test ensures the fix works with the exact scenario from the bug report:
// "The issue only seems to happen when there are multiple routing/conditional blocks"
// Simulate the exact execution order from the CSV:
// Router 1 → Function 1, Router 2 → Function 2, but Workflow 2 executed anyway
// Step 1: Router 1 selects function-1 (not router-2)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// After router-1, only function-1 should be active
expect(mockContext.activeExecutionPath.has('function-1')).toBe(true)
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Execute function-1
mockContext.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('function-1')
// Step 3: Check what blocks would be available for next execution
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// CRITICAL: Neither router-2 nor workflow-2 should be eligible for execution
// because they were not selected by router-1
expect(blockIds).not.toContain('router-2')
expect(blockIds).not.toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
// Verify none of the unselected blocks are in active path
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
})
})

View File

@@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { afterEach, describe, expect, it, vi } from 'vitest'
import {
cn,
convertScheduleOptionsToCron,
@@ -34,9 +34,11 @@ vi.mock('crypto', () => ({
}),
}))
beforeEach(() => {
process.env.ENCRYPTION_KEY = '1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef'
})
vi.mock('@/lib/env', () => ({
env: {
ENCRYPTION_KEY: '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef',
},
}))
afterEach(() => {
vi.clearAllMocks()

View File

@@ -42,7 +42,6 @@ export const ollamaProvider: ProviderConfig = {
},
executeRequest: async (request: ProviderRequest): Promise<ProviderResponse> => {
console.log(request)
logger.info('Preparing Ollama request', {
model: request.model,
hasSystemPrompt: !!request.systemPrompt,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 MiB

After

Width:  |  Height:  |  Size: 35 MiB

View File

@@ -72,7 +72,7 @@ function validateVariable(variable: Variable): string | undefined {
return undefined // Valid object
} catch (e) {
console.log('Object parsing error:', e)
logger.error('Object parsing error:', e)
return 'Invalid object syntax'
}
case 'array':

View File

@@ -1,6 +1,7 @@
import type { Edge } from 'reactflow'
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console-logger'
import { getBlock } from '@/blocks'
import { resolveOutputType } from '@/blocks/utils'
import { pushHistory, type WorkflowStoreWithHistory, withHistory } from '../middleware'
@@ -11,6 +12,8 @@ import { mergeSubblockState } from '../utils'
import type { Position, SubBlockState, SyncControl, WorkflowState } from './types'
import { generateLoopBlocks, generateParallelBlocks } from './utils'
const logger = createLogger('WorkflowStore')
const initialState = {
blocks: {},
edges: [],
@@ -209,11 +212,11 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
updateParentId: (id: string, parentId: string, extent: 'parent') => {
const block = get().blocks[id]
if (!block) {
console.warn(`Cannot set parent: Block ${id} not found`)
logger.warn(`Cannot set parent: Block ${id} not found`)
return
}
console.log('UpdateParentId called:', {
logger.info('UpdateParentId called:', {
blockId: id,
blockName: block.name,
blockType: block.type,
@@ -224,7 +227,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Skip if the parent ID hasn't changed
if (block.data?.parentId === parentId) {
console.log('Parent ID unchanged, skipping update')
logger.info('Parent ID unchanged, skipping update')
return
}
@@ -260,7 +263,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
parallels: { ...get().parallels },
}
console.log('[WorkflowStore/updateParentId] Updated parentId relationship:', {
logger.info('[WorkflowStore/updateParentId] Updated parentId relationship:', {
blockId: id,
newParentId: parentId || 'None (removed parent)',
keepingPosition: absolutePosition,
@@ -306,7 +309,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Start recursive search from the target block
findAllDescendants(id)
console.log('[WorkflowStore/removeBlock] Found blocks to remove:', {
logger.info('Found blocks to remove:', {
targetId: id,
totalBlocksToRemove: Array.from(blocksToRemove),
includesHierarchy: blocksToRemove.size > 1,
@@ -390,7 +393,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Validate the edge exists
const edgeToRemove = get().edges.find((edge) => edge.id === edgeId)
if (!edgeToRemove) {
console.warn(`Attempted to remove non-existent edge: ${edgeId}`)
logger.warn(`Attempted to remove non-existent edge: ${edgeId}`)
return
}
@@ -810,7 +813,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
console.error('Cannot revert: no active workflow ID')
logger.error('Cannot revert: no active workflow ID')
return
}
@@ -883,13 +886,13 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
if (!response.ok) {
const errorData = await response.json()
console.error('Failed to persist revert to deployed state:', errorData.error)
logger.error('Failed to persist revert to deployed state:', errorData.error)
// Don't throw error to avoid breaking the UI, but log it
} else {
console.log('Successfully persisted revert to deployed state')
logger.info('Successfully persisted revert to deployed state')
}
} catch (error) {
console.error('Error calling revert to deployed API:', error)
logger.error('Error calling revert to deployed API:', error)
// Don't throw error to avoid breaking the UI
}
},

View File

@@ -1,6 +1,9 @@
import { createLogger } from '@/lib/logs/console-logger'
import type { TypeformInsightsParams, TypeformInsightsResponse } from '@/tools/typeform/types'
import type { ToolConfig } from '@/tools/types'
const logger = createLogger('TypeformInsightsTool')
export const insightsTool: ToolConfig<TypeformInsightsParams, TypeformInsightsResponse> = {
id: 'typeform_insights',
name: 'Typeform Insights',
@@ -38,7 +41,7 @@ export const insightsTool: ToolConfig<TypeformInsightsParams, TypeformInsightsRe
try {
const errorData = await response.json()
console.log('Typeform API error response:', JSON.stringify(errorData, null, 2))
logger.info('Typeform API error response:', JSON.stringify(errorData, null, 2))
if (errorData?.message) {
errorMessage = errorData.message
@@ -68,7 +71,7 @@ Details from API: ${errorMessage}${errorDetails}`,
}
} catch (e) {
// If we can't parse the error as JSON, just use the status text
console.log('Error parsing Typeform API error:', e)
logger.error('Error parsing Typeform API error:', e)
}
throw new Error(`Typeform API error (${response.status}): ${errorMessage}${errorDetails}`)